aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/device-drivers.tmpl36
-rw-r--r--Documentation/DocBook/gpu.tmpl38
-rw-r--r--Documentation/arm64/silicon-errata.txt1
-rw-r--r--Documentation/devicetree/bindings/display/imx/ldb.txt1
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-dpi.txt2
-rw-r--r--Documentation/filesystems/devpts.txt145
-rw-r--r--Documentation/kdump/gdbmacros.txt93
-rw-r--r--Documentation/networking/dsa/dsa.txt17
-rw-r--r--Documentation/networking/ip-sysctl.txt8
-rw-r--r--Documentation/security/keys.txt5
-rw-r--r--MAINTAINERS20
-rw-r--r--Makefile2
-rw-r--r--arch/arm/kernel/ptrace.c2
-rw-r--r--arch/arm/mach-omap2/board-ldp.c3
-rw-r--r--arch/arm/mach-omap2/board-rx51-video.c4
-rw-r--r--arch/arm/mach-omap2/display.c2
-rw-r--r--arch/arm/mach-omap2/display.h5
-rw-r--r--arch/arm/mach-omap2/dss-common.c2
-rw-r--r--arch/arm64/Kconfig21
-rw-r--r--arch/arm64/Kconfig.debug25
-rw-r--r--arch/arm64/Makefile4
-rw-r--r--arch/arm64/include/asm/elf.h4
-rw-r--r--arch/arm64/include/asm/memory.h3
-rw-r--r--arch/arm64/include/asm/page.h12
-rw-r--r--arch/arm64/include/asm/uaccess.h13
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h8
-rw-r--r--arch/arm64/kernel/cpuinfo.c8
-rw-r--r--arch/arm64/kernel/traps.c5
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c36
-rw-r--r--arch/arm64/kvm/sys_regs.c13
-rw-r--r--arch/arm64/mm/dump.c8
-rw-r--r--arch/arm64/mm/hugetlbpage.c14
-rw-r--r--arch/parisc/include/asm/traps.h2
-rw-r--r--arch/parisc/kernel/processor.c5
-rw-r--r--arch/parisc/kernel/time.c5
-rw-r--r--arch/parisc/kernel/unaligned.c13
-rw-r--r--arch/parisc/kernel/unwind.c22
-rw-r--r--arch/powerpc/include/asm/reg.h6
-rw-r--r--arch/powerpc/kernel/prom_init.c1
-rw-r--r--arch/powerpc/mm/hash_utils_64.c22
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c5
-rw-r--r--arch/powerpc/mm/pgtable-radix.c23
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c49
-rw-r--r--arch/s390/configs/default_defconfig44
-rw-r--r--arch/s390/configs/gcov_defconfig34
-rw-r--r--arch/s390/configs/performance_defconfig36
-rw-r--r--arch/s390/configs/zfcpdump_defconfig4
-rw-r--r--arch/s390/defconfig44
-rw-r--r--arch/s390/mm/fault.c1
-rw-r--r--arch/s390/net/bpf_jit.h4
-rw-r--r--arch/s390/net/bpf_jit_comp.c4
-rw-r--r--arch/sparc/include/asm/head_64.h4
-rw-r--r--arch/sparc/include/asm/ttable.h8
-rw-r--r--arch/sparc/kernel/Makefile1
-rw-r--r--arch/sparc/kernel/rtrap_64.S57
-rw-r--r--arch/sparc/kernel/signal32.c46
-rw-r--r--arch/sparc/kernel/signal_32.c41
-rw-r--r--arch/sparc/kernel/signal_64.c31
-rw-r--r--arch/sparc/kernel/sigutil_32.c9
-rw-r--r--arch/sparc/kernel/sigutil_64.c10
-rw-r--r--arch/sparc/kernel/urtt_fill.S98
-rw-r--r--arch/sparc/mm/init_64.c10
-rw-r--r--arch/x86/kernel/early-quirks.c404
-rw-r--r--arch/x86/kvm/cpuid.c22
-rw-r--r--arch/x86/kvm/mmu.c8
-rw-r--r--arch/x86/kvm/x86.c12
-rw-r--r--crypto/asymmetric_keys/Kconfig1
-rw-r--r--drivers/acpi/acpi_processor.c9
-rw-r--r--drivers/acpi/acpi_video.c9
-rw-r--r--drivers/acpi/acpica/hwregs.c23
-rw-r--r--drivers/acpi/processor_throttling.c9
-rw-r--r--drivers/atm/firestream.c6
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c17
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dma-buf.c7
-rw-r--r--drivers/dma-buf/fence-array.c144
-rw-r--r--drivers/dma-buf/fence.c8
-rw-r--r--drivers/dma-buf/reservation.c72
-rw-r--r--drivers/dma-buf/sync_file.c2
-rw-r--r--drivers/gpio/gpio-lpc32xx.c48
-rw-r--r--drivers/gpio/gpiolib.c51
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c16
-rw-r--r--drivers/gpu/drm/arc/arcpgu.h1
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c16
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c29
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c18
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c105
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c78
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.h5
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c14
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c3
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c10
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c14
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c4
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c12
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c8
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.c11
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c8
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c10
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c11
-rw-r--r--drivers/gpu/drm/drm_atomic.c105
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c568
-rw-r--r--drivers/gpu/drm/drm_bridge.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c459
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c33
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_edid_load.c2
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c30
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c54
-rw-r--r--drivers/gpu/drm/drm_fops.c24
-rw-r--r--drivers/gpu/drm/drm_fourcc.c320
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c12
-rw-r--r--drivers/gpu/drm/drm_irq.c213
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c38
-rw-r--r--drivers/gpu/drm/drm_mm.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c6
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c13
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c10
-rw-r--r--drivers/gpu/drm/drm_prime.c10
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c21
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c205
-rw-r--r--drivers/gpu/drm/drm_sysfs.c71
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c8
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c23
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c9
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c11
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c7
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c20
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c2
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c44
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c171
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c132
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c129
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h405
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c417
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c464
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c26
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c239
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c81
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h6
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c174
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c515
-rw-r--r--drivers/gpu/drm/i915/i915_params.c18
-rw-r--r--drivers/gpu/drm/i915/i915_params.h4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h21
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c11
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h48
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c31
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h2
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c5
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c28
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h16
-rw-r--r--drivers/gpu/drm/i915/intel_color.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c3
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c30
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1711
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c486
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c172
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c470
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c40
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h196
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c55
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c179
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c77
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c19
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c41
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h37
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h3
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c179
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c355
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c13
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c704
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h19
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c7
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c109
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c147
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1189
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c53
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c476
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h41
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c61
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c3
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c43
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c221
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h13
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c13
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h7
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c78
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c6
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c10
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c5
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c40
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c13
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c19
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c20
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c12
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c39
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c8
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c9
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig1
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig28
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile28
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c11
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c28
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c6
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c23
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c6
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c471
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc_coefs.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c136
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c66
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c256
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h45
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.c46
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.h1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c22
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c21
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_common.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_phy.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_pll.c79
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h871
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c129
-rw-r--r--drivers/gpu/drm/omapdrm/dss/rfbi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c56
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c15
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h14
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c18
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c16
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c12
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c20
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c3
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c15
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c9
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c204
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h12
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c72
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c68
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c2
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c10
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c7
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c10
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c17
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c14
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c17
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c17
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c7
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c7
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c7
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c9
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c1
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c1
-rw-r--r--drivers/gpu/drm/tegra/output.c8
-rw-r--r--drivers/gpu/drm/tegra/rgb.c1
-rw-r--r--drivers/gpu/drm/tegra/sor.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c21
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c12
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c5
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c181
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c150
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h4
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c34
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c49
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-pic32-evic.c2
-rw-r--r--drivers/media/platform/omap/omap_vout.c2
-rw-r--r--drivers/media/platform/omap/omap_voutdef.h2
-rw-r--r--drivers/media/platform/omap/omap_voutlib.c2
-rw-r--r--drivers/mmc/core/mmc.c4
-rw-r--r--drivers/mmc/host/sunxi-mmc.c9
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c48
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c17
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c11
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c18
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c26
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c16
-rw-r--r--drivers/net/ethernet/sfc/efx.c32
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h12
-rw-r--r--drivers/net/ethernet/sfc/rx.c102
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c2
-rw-r--r--drivers/net/team/team.c9
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/smsc95xx.c51
-rw-r--r--drivers/net/virtio_net.c18
-rw-r--r--drivers/net/vxlan.c3
-rw-r--r--drivers/perf/arm_pmu.c12
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c5
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/ptp/ptp_chardev.c12
-rw-r--r--drivers/scsi/aacraid/aacraid.h5
-rw-r--r--drivers/scsi/aacraid/linit.c11
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/scsi/sd.c9
-rw-r--r--drivers/staging/android/sync.h3
-rw-r--r--drivers/thermal/int340x_thermal/int3406_thermal.c2
-rw-r--r--drivers/tty/Kconfig11
-rw-r--r--drivers/tty/pty.c15
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c3
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c6
-rw-r--r--drivers/vfio/vfio_iommu_type1.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c10
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c60
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c44
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c46
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c54
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c58
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c47
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c83
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c45
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c46
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/apply.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dpi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.h11
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss_features.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi.h3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/output.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/pll.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/rfbi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/sdi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/video-pll.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb.h2
-rw-r--r--fs/btrfs/extent-tree.c6
-rw-r--r--fs/btrfs/extent_io.c10
-rw-r--r--fs/btrfs/inode.c13
-rw-r--r--fs/btrfs/ordered-data.c6
-rw-r--r--fs/btrfs/ordered-data.h2
-rw-r--r--fs/btrfs/reada.c2
-rw-r--r--fs/btrfs/scrub.c50
-rw-r--r--fs/btrfs/volumes.c32
-rw-r--r--fs/cachefiles/interface.c2
-rw-r--r--fs/ceph/addr.c6
-rw-r--r--fs/ceph/cache.c141
-rw-r--r--fs/ceph/cache.h44
-rw-r--r--fs/ceph/caps.c23
-rw-r--r--fs/ceph/file.c27
-rw-r--r--fs/ceph/super.h4
-rw-r--r--fs/devpts/inode.c191
-rw-r--r--fs/fscache/page.c2
-rw-r--r--fs/namei.c49
-rw-r--r--include/acpi/video.h6
-rw-r--r--include/drm/drmP.h38
-rw-r--r--include/drm/drm_atomic.h82
-rw-r--r--include/drm/drm_atomic_helper.h42
-rw-r--r--include/drm/drm_crtc.h329
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_dp_helper.h1
-rw-r--r--include/drm/drm_fb_helper.h11
-rw-r--r--include/drm/drm_fourcc.h37
-rw-r--r--include/drm/drm_mipi_dsi.h3
-rw-r--r--include/drm/drm_modes.h2
-rw-r--r--include/drm/drm_modeset_helper_vtables.h49
-rw-r--r--include/drm/drm_simple_kms_helper.h94
-rw-r--r--include/drm/i915_drm.h3
-rw-r--r--include/linux/ceph/osd_client.h5
-rw-r--r--include/linux/ceph/osdmap.h5
-rw-r--r--include/linux/devpts_fs.h9
-rw-r--r--include/linux/dma-buf.h13
-rw-r--r--include/linux/fence-array.h73
-rw-r--r--include/linux/fence.h15
-rw-r--r--include/linux/fscache-cache.h2
-rw-r--r--include/linux/io-mapping.h10
-rw-r--r--include/linux/irqchip/arm-gic-v3.h6
-rw-r--r--include/linux/namei.h2
-rw-r--r--include/linux/page_idle.h43
-rw-r--r--include/linux/platform_data/omapdss.h37
-rw-r--r--include/linux/reservation.h53
-rw-r--r--include/linux/sctp.h2
-rw-r--r--include/linux/timekeeping.h3
-rw-r--r--include/linux/vga_switcheroo.h2
-rw-r--r--include/net/ip6_tunnel.h3
-rw-r--r--include/net/pkt_sched.h1
-rw-r--r--include/sound/omap-hdmi-audio.h9
-rw-r--r--include/uapi/linux/ethtool.h11
-rw-r--r--include/uapi/linux/pkt_cls.h4
-rw-r--r--include/video/omap-panel-data.h157
-rw-r--r--include/video/omapfb_dss.h (renamed from include/video/omapdss.h)80
-rw-r--r--kernel/bpf/inode.c1
-rw-r--r--kernel/irq/ipi.c2
-rw-r--r--kernel/time/hrtimer.c1
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/Makefile1
-rw-r--r--lib/test_uuid.c133
-rw-r--r--lib/uuid.c4
-rw-r--r--mm/memcontrol.c3
-rw-r--r--mm/oom_kill.c7
-rw-r--r--mm/page_alloc.c39
-rw-r--r--mm/page_owner.c26
-rw-r--r--mm/page_poison.c8
-rw-r--r--mm/vmalloc.c9
-rw-r--r--mm/vmstat.c2
-rw-r--r--mm/z3fold.c24
-rw-r--r--net/8021q/vlan.c5
-rw-r--r--net/8021q/vlan.h2
-rw-r--r--net/8021q/vlan_dev.c20
-rw-r--r--net/atm/signaling.c2
-rw-r--r--net/atm/svc.c4
-rw-r--r--net/ceph/osd_client.c51
-rw-r--r--net/ceph/osdmap.c4
-rw-r--r--net/core/hwbm.c3
-rw-r--r--net/core/pktgen.c8
-rw-r--r--net/ieee802154/nl802154.c4
-rw-r--r--net/ipv4/af_inet.c8
-rw-r--r--net/ipv4/sysctl_net_ipv4.c4
-rw-r--r--net/ipv6/Kconfig9
-rw-r--r--net/ipv6/Makefile2
-rw-r--r--net/ipv6/fou6.c2
-rw-r--r--net/ipv6/ip6_gre.c3
-rw-r--r--net/l2tp/l2tp_ip6.c12
-rw-r--r--net/lapb/lapb_in.c5
-rw-r--r--net/lapb/lapb_out.c4
-rw-r--r--net/lapb/lapb_subr.c14
-rw-r--r--net/openvswitch/actions.c20
-rw-r--r--net/sched/act_police.c11
-rw-r--r--net/sched/sch_api.c4
-rw-r--r--net/sched/sch_htb.c13
-rw-r--r--net/sctp/sctp_diag.c3
-rw-r--r--net/sctp/socket.c1
-rw-r--r--net/tipc/netlink_compat.c111
-rwxr-xr-xscripts/checkpatch.pl1
-rw-r--r--security/keys/compat.c2
-rw-r--r--security/keys/dh.c8
-rw-r--r--security/keys/internal.h5
-rw-r--r--security/keys/keyctl.c4
-rw-r--r--sound/soc/omap/omap-hdmi-audio.c1
-rw-r--r--virt/kvm/arm/hyp/vgic-v2-sr.c7
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c14
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c14
-rw-r--r--virt/kvm/irqchip.c2
-rw-r--r--virt/kvm/kvm_main.c22
593 files changed, 13272 insertions, 9738 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index de79efdad46c..c3313d45f4d6 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -128,16 +128,48 @@ X!Edrivers/base/interface.c
128!Edrivers/base/platform.c 128!Edrivers/base/platform.c
129!Edrivers/base/bus.c 129!Edrivers/base/bus.c
130 </sect1> 130 </sect1>
131 <sect1><title>Device Drivers DMA Management</title> 131 <sect1>
132 <title>Buffer Sharing and Synchronization</title>
133 <para>
134 The dma-buf subsystem provides the framework for sharing buffers
135 for hardware (DMA) access across multiple device drivers and
136 subsystems, and for synchronizing asynchronous hardware access.
137 </para>
138 <para>
139 This is used, for example, by drm "prime" multi-GPU support, but
140 is of course not limited to GPU use cases.
141 </para>
142 <para>
143 The three main components of this are: (1) dma-buf, representing
144 a sg_table and exposed to userspace as a file descriptor to allow
145 passing between devices, (2) fence, which provides a mechanism
146 to signal when one device as finished access, and (3) reservation,
147 which manages the shared or exclusive fence(s) associated with
148 the buffer.
149 </para>
150 <sect2><title>dma-buf</title>
132!Edrivers/dma-buf/dma-buf.c 151!Edrivers/dma-buf/dma-buf.c
152!Iinclude/linux/dma-buf.h
153 </sect2>
154 <sect2><title>reservation</title>
155!Pdrivers/dma-buf/reservation.c Reservation Object Overview
156!Edrivers/dma-buf/reservation.c
157!Iinclude/linux/reservation.h
158 </sect2>
159 <sect2><title>fence</title>
133!Edrivers/dma-buf/fence.c 160!Edrivers/dma-buf/fence.c
134!Edrivers/dma-buf/seqno-fence.c
135!Iinclude/linux/fence.h 161!Iinclude/linux/fence.h
162!Edrivers/dma-buf/seqno-fence.c
136!Iinclude/linux/seqno-fence.h 163!Iinclude/linux/seqno-fence.h
164!Edrivers/dma-buf/fence-array.c
165!Iinclude/linux/fence-array.h
137!Edrivers/dma-buf/reservation.c 166!Edrivers/dma-buf/reservation.c
138!Iinclude/linux/reservation.h 167!Iinclude/linux/reservation.h
139!Edrivers/dma-buf/sync_file.c 168!Edrivers/dma-buf/sync_file.c
140!Iinclude/linux/sync_file.h 169!Iinclude/linux/sync_file.h
170 </sect2>
171 </sect1>
172 <sect1><title>Device Drivers DMA Management</title>
141!Edrivers/base/dma-coherent.c 173!Edrivers/base/dma-coherent.c
142!Edrivers/base/dma-mapping.c 174!Edrivers/base/dma-mapping.c
143 </sect1> 175 </sect1>
diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl
index 7586bf75f62e..d09536c91717 100644
--- a/Documentation/DocBook/gpu.tmpl
+++ b/Documentation/DocBook/gpu.tmpl
@@ -1018,6 +1018,11 @@ int max_width, max_height;</synopsis>
1018 </para> 1018 </para>
1019 </sect2> 1019 </sect2>
1020 <sect2> 1020 <sect2>
1021 <title>DRM Format Handling</title>
1022!Iinclude/drm/drm_fourcc.h
1023!Edrivers/gpu/drm/drm_fourcc.c
1024 </sect2>
1025 <sect2>
1021 <title>Dumb Buffer Objects</title> 1026 <title>Dumb Buffer Objects</title>
1022 <para> 1027 <para>
1023 The KMS API doesn't standardize backing storage object creation and 1028 The KMS API doesn't standardize backing storage object creation and
@@ -1092,22 +1097,6 @@ int max_width, max_height;</synopsis>
1092 operation. 1097 operation.
1093 </para> 1098 </para>
1094 </sect2> 1099 </sect2>
1095 <sect2>
1096 <title>Locking</title>
1097 <para>
1098 Beside some lookup structures with their own locking (which is hidden
1099 behind the interface functions) most of the modeset state is protected
1100 by the <code>dev-&lt;mode_config.lock</code> mutex and additionally
1101 per-crtc locks to allow cursor updates, pageflips and similar operations
1102 to occur concurrently with background tasks like output detection.
1103 Operations which cross domains like a full modeset always grab all
1104 locks. Drivers there need to protect resources shared between crtcs with
1105 additional locking. They also need to be careful to always grab the
1106 relevant crtc locks if a modset functions touches crtc state, e.g. for
1107 load detection (which does only grab the <code>mode_config.lock</code>
1108 to allow concurrent screen updates on live crtcs).
1109 </para>
1110 </sect2>
1111 </sect1> 1100 </sect1>
1112 1101
1113 <!-- Internals: kms initialization and cleanup --> 1102 <!-- Internals: kms initialization and cleanup -->
@@ -1586,7 +1575,7 @@ void intel_crt_init(struct drm_device *dev)
1586 </sect3> 1575 </sect3>
1587 <sect3> 1576 <sect3>
1588 <title>Implementing Asynchronous Atomic Commit</title> 1577 <title>Implementing Asynchronous Atomic Commit</title>
1589!Pdrivers/gpu/drm/drm_atomic_helper.c implementing async commit 1578!Pdrivers/gpu/drm/drm_atomic_helper.c implementing nonblocking commit
1590 </sect3> 1579 </sect3>
1591 <sect3> 1580 <sect3>
1592 <title>Atomic State Reset and Initialization</title> 1581 <title>Atomic State Reset and Initialization</title>
@@ -1699,6 +1688,12 @@ void intel_crt_init(struct drm_device *dev)
1699!Edrivers/gpu/drm/drm_panel.c 1688!Edrivers/gpu/drm/drm_panel.c
1700!Pdrivers/gpu/drm/drm_panel.c drm panel 1689!Pdrivers/gpu/drm/drm_panel.c drm panel
1701 </sect2> 1690 </sect2>
1691 <sect2>
1692 <title>Simple KMS Helper Reference</title>
1693!Iinclude/drm/drm_simple_kms_helper.h
1694!Edrivers/gpu/drm/drm_simple_kms_helper.c
1695!Pdrivers/gpu/drm/drm_simple_kms_helper.c overview
1696 </sect2>
1702 </sect1> 1697 </sect1>
1703 1698
1704 <!-- Internals: kms properties --> 1699 <!-- Internals: kms properties -->
@@ -2845,14 +2840,7 @@ void (*disable_vblank) (struct drm_device *dev, int crtc);</synopsis>
2845 <para> 2840 <para>
2846 Drivers must initialize the vertical blanking handling core with a call to 2841 Drivers must initialize the vertical blanking handling core with a call to
2847 <function>drm_vblank_init</function> in their 2842 <function>drm_vblank_init</function> in their
2848 <methodname>load</methodname> operation. The function will set the struct 2843 <methodname>load</methodname> operation.
2849 <structname>drm_device</structname>
2850 <structfield>vblank_disable_allowed</structfield> field to 0. This will
2851 keep vertical blanking interrupts enabled permanently until the first mode
2852 set operation, where <structfield>vblank_disable_allowed</structfield> is
2853 set to 1. The reason behind this is not clear. Drivers can set the field
2854 to 1 after <function>calling drm_vblank_init</function> to make vertical
2855 blanking interrupts dynamically managed from the beginning.
2856 </para> 2844 </para>
2857 <para> 2845 <para>
2858 Vertical blanking interrupts can be enabled by the DRM core or by drivers 2846 Vertical blanking interrupts can be enabled by the DRM core or by drivers
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index c6938e50e71f..4da60b463995 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -56,6 +56,7 @@ stable kernels.
56| ARM | MMU-500 | #841119,#826419 | N/A | 56| ARM | MMU-500 | #841119,#826419 | N/A |
57| | | | | 57| | | | |
58| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | 58| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
59| Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 |
59| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | 60| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
60| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | 61| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 |
61| Cavium | ThunderX SMMUv2 | #27704 | N/A | 62| Cavium | ThunderX SMMUv2 | #27704 | N/A |
diff --git a/Documentation/devicetree/bindings/display/imx/ldb.txt b/Documentation/devicetree/bindings/display/imx/ldb.txt
index 0a175d991b52..a407462c885e 100644
--- a/Documentation/devicetree/bindings/display/imx/ldb.txt
+++ b/Documentation/devicetree/bindings/display/imx/ldb.txt
@@ -62,6 +62,7 @@ Required properties:
62 display-timings are used instead. 62 display-timings are used instead.
63 63
64Optional properties (required if display-timings are used): 64Optional properties (required if display-timings are used):
65 - ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
65 - display-timings : A node that describes the display timings as defined in 66 - display-timings : A node that describes the display timings as defined in
66 Documentation/devicetree/bindings/display/display-timing.txt. 67 Documentation/devicetree/bindings/display/display-timing.txt.
67 - fsl,data-mapping : should be "spwg" or "jeida" 68 - fsl,data-mapping : should be "spwg" or "jeida"
diff --git a/Documentation/devicetree/bindings/display/panel/panel-dpi.txt b/Documentation/devicetree/bindings/display/panel/panel-dpi.txt
index 216c894d4f99..b52ac52757df 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-dpi.txt
+++ b/Documentation/devicetree/bindings/display/panel/panel-dpi.txt
@@ -7,6 +7,8 @@ Required properties:
7Optional properties: 7Optional properties:
8- label: a symbolic name for the panel 8- label: a symbolic name for the panel
9- enable-gpios: panel enable gpio 9- enable-gpios: panel enable gpio
10- reset-gpios: GPIO to control the RESET pin
11- vcc-supply: phandle of regulator that will be used to enable power to the display
10 12
11Required nodes: 13Required nodes:
12- "panel-timing" containing video timings 14- "panel-timing" containing video timings
diff --git a/Documentation/filesystems/devpts.txt b/Documentation/filesystems/devpts.txt
index 30d2fcb32f72..9f94fe276dea 100644
--- a/Documentation/filesystems/devpts.txt
+++ b/Documentation/filesystems/devpts.txt
@@ -1,141 +1,26 @@
1Each mount of the devpts filesystem is now distinct such that ptys
2and their indicies allocated in one mount are independent from ptys
3and their indicies in all other mounts.
1 4
2To support containers, we now allow multiple instances of devpts filesystem, 5All mounts of the devpts filesystem now create a /dev/pts/ptmx node
3such that indices of ptys allocated in one instance are independent of indices 6with permissions 0000.
4allocated in other instances of devpts.
5 7
6To preserve backward compatibility, this support for multiple instances is 8To retain backwards compatibility the a ptmx device node (aka any node
7enabled only if: 9created with "mknod name c 5 2") when opened will look for an instance
10of devpts under the name "pts" in the same directory as the ptmx device
11node.
8 12
9 - CONFIG_DEVPTS_MULTIPLE_INSTANCES=y, and 13As an option instead of placing a /dev/ptmx device node at /dev/ptmx
10 - '-o newinstance' mount option is specified while mounting devpts 14it is possible to place a symlink to /dev/pts/ptmx at /dev/ptmx or
11 15to bind mount /dev/ptx/ptmx to /dev/ptmx. If you opt for using
12IOW, devpts now supports both single-instance and multi-instance semantics. 16the devpts filesystem in this manner devpts should be mounted with
13 17the ptmxmode=0666, or chmod 0666 /dev/pts/ptmx should be called.
14If CONFIG_DEVPTS_MULTIPLE_INSTANCES=n, there is no change in behavior and
15this referred to as the "legacy" mode. In this mode, the new mount options
16(-o newinstance and -o ptmxmode) will be ignored with a 'bogus option' message
17on console.
18
19If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and devpts is mounted without the
20'newinstance' option (as in current start-up scripts) the new mount binds
21to the initial kernel mount of devpts. This mode is referred to as the
22'single-instance' mode and the current, single-instance semantics are
23preserved, i.e PTYs are common across the system.
24
25The only difference between this single-instance mode and the legacy mode
26is the presence of new, '/dev/pts/ptmx' node with permissions 0000, which
27can safely be ignored.
28
29If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and 'newinstance' option is specified,
30the mount is considered to be in the multi-instance mode and a new instance
31of the devpts fs is created. Any ptys created in this instance are independent
32of ptys in other instances of devpts. Like in the single-instance mode, the
33/dev/pts/ptmx node is present. To effectively use the multi-instance mode,
34open of /dev/ptmx must be a redirected to '/dev/pts/ptmx' using a symlink or
35bind-mount.
36
37Eg: A container startup script could do the following:
38
39 $ chmod 0666 /dev/pts/ptmx
40 $ rm /dev/ptmx
41 $ ln -s pts/ptmx /dev/ptmx
42 $ ns_exec -cm /bin/bash
43
44 # We are now in new container
45
46 $ umount /dev/pts
47 $ mount -t devpts -o newinstance lxcpts /dev/pts
48 $ sshd -p 1234
49
50where 'ns_exec -cm /bin/bash' calls clone() with CLONE_NEWNS flag and execs
51/bin/bash in the child process. A pty created by the sshd is not visible in
52the original mount of /dev/pts.
53 18
54Total count of pty pairs in all instances is limited by sysctls: 19Total count of pty pairs in all instances is limited by sysctls:
55kernel.pty.max = 4096 - global limit 20kernel.pty.max = 4096 - global limit
56kernel.pty.reserve = 1024 - reserve for initial instance 21kernel.pty.reserve = 1024 - reserved for filesystems mounted from the initial mount namespace
57kernel.pty.nr - current count of ptys 22kernel.pty.nr - current count of ptys
58 23
59Per-instance limit could be set by adding mount option "max=<count>". 24Per-instance limit could be set by adding mount option "max=<count>".
60This feature was added in kernel 3.4 together with sysctl kernel.pty.reserve. 25This feature was added in kernel 3.4 together with sysctl kernel.pty.reserve.
61In kernels older than 3.4 sysctl kernel.pty.max works as per-instance limit. 26In kernels older than 3.4 sysctl kernel.pty.max works as per-instance limit.
62
63User-space changes
64------------------
65
66In multi-instance mode (i.e '-o newinstance' mount option is specified at least
67once), following user-space issues should be noted.
68
691. If -o newinstance mount option is never used, /dev/pts/ptmx can be ignored
70 and no change is needed to system-startup scripts.
71
722. To effectively use multi-instance mode (i.e -o newinstance is specified)
73 administrators or startup scripts should "redirect" open of /dev/ptmx to
74 /dev/pts/ptmx using either a bind mount or symlink.
75
76 $ mount -t devpts -o newinstance devpts /dev/pts
77
78 followed by either
79
80 $ rm /dev/ptmx
81 $ ln -s pts/ptmx /dev/ptmx
82 $ chmod 666 /dev/pts/ptmx
83 or
84 $ mount -o bind /dev/pts/ptmx /dev/ptmx
85
863. The '/dev/ptmx -> pts/ptmx' symlink is the preferred method since it
87 enables better error-reporting and treats both single-instance and
88 multi-instance mounts similarly.
89
90 But this method requires that system-startup scripts set the mode of
91 /dev/pts/ptmx correctly (default mode is 0000). The scripts can set the
92 mode by, either
93
94 - adding ptmxmode mount option to devpts entry in /etc/fstab, or
95 - using 'chmod 0666 /dev/pts/ptmx'
96
974. If multi-instance mode mount is needed for containers, but the system
98 startup scripts have not yet been updated, container-startup scripts
99 should bind mount /dev/ptmx to /dev/pts/ptmx to avoid breaking single-
100 instance mounts.
101
102 Or, in general, container-startup scripts should use:
103
104 mount -t devpts -o newinstance -o ptmxmode=0666 devpts /dev/pts
105 if [ ! -L /dev/ptmx ]; then
106 mount -o bind /dev/pts/ptmx /dev/ptmx
107 fi
108
109 When all devpts mounts are multi-instance, /dev/ptmx can permanently be
110 a symlink to pts/ptmx and the bind mount can be ignored.
111
1125. A multi-instance mount that is not accompanied by the /dev/ptmx to
113 /dev/pts/ptmx redirection would result in an unusable/unreachable pty.
114
115 mount -t devpts -o newinstance lxcpts /dev/pts
116
117 immediately followed by:
118
119 open("/dev/ptmx")
120
121 would create a pty, say /dev/pts/7, in the initial kernel mount.
122 But /dev/pts/7 would be invisible in the new mount.
123
1246. The permissions for /dev/pts/ptmx node should be specified when mounting
125 /dev/pts, using the '-o ptmxmode=%o' mount option (default is 0000).
126
127 mount -t devpts -o newinstance -o ptmxmode=0644 devpts /dev/pts
128
129 The permissions can be later be changed as usual with 'chmod'.
130
131 chmod 666 /dev/pts/ptmx
132
1337. A mount of devpts without the 'newinstance' option results in binding to
134 initial kernel mount. This behavior while preserving legacy semantics,
135 does not provide strict isolation in a container environment. i.e by
136 mounting devpts without the 'newinstance' option, a container could
137 get visibility into the 'host' or root container's devpts.
138
139 To workaround this and have strict isolation, all mounts of devpts,
140 including the mount in the root container, should use the newinstance
141 option.
diff --git a/Documentation/kdump/gdbmacros.txt b/Documentation/kdump/gdbmacros.txt
index 35f6a982a0d5..220d0a80ca2c 100644
--- a/Documentation/kdump/gdbmacros.txt
+++ b/Documentation/kdump/gdbmacros.txt
@@ -170,21 +170,92 @@ document trapinfo
170 address the kernel panicked. 170 address the kernel panicked.
171end 171end
172 172
173define dump_log_idx
174 set $idx = $arg0
175 if ($argc > 1)
176 set $prev_flags = $arg1
177 else
178 set $prev_flags = 0
179 end
180 set $msg = ((struct printk_log *) (log_buf + $idx))
181 set $prefix = 1
182 set $newline = 1
183 set $log = log_buf + $idx + sizeof(*$msg)
173 184
174define dmesg 185 # prev & LOG_CONT && !(msg->flags & LOG_PREIX)
175 set $i = 0 186 if (($prev_flags & 8) && !($msg->flags & 4))
176 set $end_idx = (log_end - 1) & (log_buf_len - 1) 187 set $prefix = 0
188 end
189
190 # msg->flags & LOG_CONT
191 if ($msg->flags & 8)
192 # (prev & LOG_CONT && !(prev & LOG_NEWLINE))
193 if (($prev_flags & 8) && !($prev_flags & 2))
194 set $prefix = 0
195 end
196 # (!(msg->flags & LOG_NEWLINE))
197 if (!($msg->flags & 2))
198 set $newline = 0
199 end
200 end
201
202 if ($prefix)
203 printf "[%5lu.%06lu] ", $msg->ts_nsec / 1000000000, $msg->ts_nsec % 1000000000
204 end
205 if ($msg->text_len != 0)
206 eval "printf \"%%%d.%ds\", $log", $msg->text_len, $msg->text_len
207 end
208 if ($newline)
209 printf "\n"
210 end
211 if ($msg->dict_len > 0)
212 set $dict = $log + $msg->text_len
213 set $idx = 0
214 set $line = 1
215 while ($idx < $msg->dict_len)
216 if ($line)
217 printf " "
218 set $line = 0
219 end
220 set $c = $dict[$idx]
221 if ($c == '\0')
222 printf "\n"
223 set $line = 1
224 else
225 if ($c < ' ' || $c >= 127 || $c == '\\')
226 printf "\\x%02x", $c
227 else
228 printf "%c", $c
229 end
230 end
231 set $idx = $idx + 1
232 end
233 printf "\n"
234 end
235end
236document dump_log_idx
237 Dump a single log given its index in the log buffer. The first
238 parameter is the index into log_buf, the second is optional and
239 specified the previous log buffer's flags, used for properly
240 formatting continued lines.
241end
177 242
178 while ($i < logged_chars) 243define dmesg
179 set $idx = (log_end - 1 - logged_chars + $i) & (log_buf_len - 1) 244 set $i = log_first_idx
245 set $end_idx = log_first_idx
246 set $prev_flags = 0
180 247
181 if ($idx + 100 <= $end_idx) || \ 248 while (1)
182 ($end_idx <= $idx && $idx + 100 < log_buf_len) 249 set $msg = ((struct printk_log *) (log_buf + $i))
183 printf "%.100s", &log_buf[$idx] 250 if ($msg->len == 0)
184 set $i = $i + 100 251 set $i = 0
185 else 252 else
186 printf "%c", log_buf[$idx] 253 dump_log_idx $i $prev_flags
187 set $i = $i + 1 254 set $i = $i + $msg->len
255 set $prev_flags = $msg->flags
256 end
257 if ($i == $end_idx)
258 loop_break
188 end 259 end
189 end 260 end
190end 261end
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index 631b0f7ae16f..9d05ed7f7da5 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -369,8 +369,6 @@ does not allocate any driver private context space.
369Switch configuration 369Switch configuration
370-------------------- 370--------------------
371 371
372- priv_size: additional size needed by the switch driver for its private context
373
374- tag_protocol: this is to indicate what kind of tagging protocol is supported, 372- tag_protocol: this is to indicate what kind of tagging protocol is supported,
375 should be a valid value from the dsa_tag_protocol enum 373 should be a valid value from the dsa_tag_protocol enum
376 374
@@ -416,11 +414,6 @@ PHY devices and link management
416 to the switch port MDIO registers. If unavailable return a negative error 414 to the switch port MDIO registers. If unavailable return a negative error
417 code. 415 code.
418 416
419- poll_link: Function invoked by DSA to query the link state of the switch
420 builtin Ethernet PHYs, per port. This function is responsible for calling
421 netif_carrier_{on,off} when appropriate, and can be used to poll all ports in a
422 single call. Executes from workqueue context.
423
424- adjust_link: Function invoked by the PHY library when a slave network device 417- adjust_link: Function invoked by the PHY library when a slave network device
425 is attached to a PHY device. This function is responsible for appropriately 418 is attached to a PHY device. This function is responsible for appropriately
426 configuring the switch port link parameters: speed, duplex, pause based on 419 configuring the switch port link parameters: speed, duplex, pause based on
@@ -542,6 +535,16 @@ Bridge layer
542Bridge VLAN filtering 535Bridge VLAN filtering
543--------------------- 536---------------------
544 537
538- port_vlan_filtering: bridge layer function invoked when the bridge gets
539 configured for turning on or off VLAN filtering. If nothing specific needs to
540 be done at the hardware level, this callback does not need to be implemented.
541 When VLAN filtering is turned on, the hardware must be programmed with
542 rejecting 802.1Q frames which have VLAN IDs outside of the programmed allowed
543 VLAN ID map/rules. If there is no PVID programmed into the switch port,
544 untagged frames must be rejected as well. When turned off the switch must
545 accept any 802.1Q frames irrespective of their VLAN ID, and untagged frames are
546 allowed.
547
545- port_vlan_prepare: bridge layer function invoked when the bridge prepares the 548- port_vlan_prepare: bridge layer function invoked when the bridge prepares the
546 configuration of a VLAN on the given port. If the operation is not supported 549 configuration of a VLAN on the given port. If the operation is not supported
547 by the hardware, this function should return -EOPNOTSUPP to inform the bridge 550 by the hardware, this function should return -EOPNOTSUPP to inform the bridge
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 6c7f365b1515..9ae929395b24 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1036,15 +1036,17 @@ proxy_arp_pvlan - BOOLEAN
1036 1036
1037shared_media - BOOLEAN 1037shared_media - BOOLEAN
1038 Send(router) or accept(host) RFC1620 shared media redirects. 1038 Send(router) or accept(host) RFC1620 shared media redirects.
1039 Overrides ip_secure_redirects. 1039 Overrides secure_redirects.
1040 shared_media for the interface will be enabled if at least one of 1040 shared_media for the interface will be enabled if at least one of
1041 conf/{all,interface}/shared_media is set to TRUE, 1041 conf/{all,interface}/shared_media is set to TRUE,
1042 it will be disabled otherwise 1042 it will be disabled otherwise
1043 default TRUE 1043 default TRUE
1044 1044
1045secure_redirects - BOOLEAN 1045secure_redirects - BOOLEAN
1046 Accept ICMP redirect messages only for gateways, 1046 Accept ICMP redirect messages only to gateways listed in the
1047 listed in default gateway list. 1047 interface's current gateway list. Even if disabled, RFC1122 redirect
1048 rules still apply.
1049 Overridden by shared_media.
1048 secure_redirects for the interface will be enabled if at least one of 1050 secure_redirects for the interface will be enabled if at least one of
1049 conf/{all,interface}/secure_redirects is set to TRUE, 1051 conf/{all,interface}/secure_redirects is set to TRUE,
1050 it will be disabled otherwise 1052 it will be disabled otherwise
diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt
index 20d05719bceb..3849814bfe6d 100644
--- a/Documentation/security/keys.txt
+++ b/Documentation/security/keys.txt
@@ -826,7 +826,8 @@ The keyctl syscall functions are:
826 (*) Compute a Diffie-Hellman shared secret or public key 826 (*) Compute a Diffie-Hellman shared secret or public key
827 827
828 long keyctl(KEYCTL_DH_COMPUTE, struct keyctl_dh_params *params, 828 long keyctl(KEYCTL_DH_COMPUTE, struct keyctl_dh_params *params,
829 char *buffer, size_t buflen); 829 char *buffer, size_t buflen,
830 void *reserved);
830 831
831 The params struct contains serial numbers for three keys: 832 The params struct contains serial numbers for three keys:
832 833
@@ -843,6 +844,8 @@ The keyctl syscall functions are:
843 public key. If the base is the remote public key, the result is 844 public key. If the base is the remote public key, the result is
844 the shared secret. 845 the shared secret.
845 846
847 The reserved argument must be set to NULL.
848
846 The buffer length must be at least the length of the prime, or zero. 849 The buffer length must be at least the length of the prime, or zero.
847 850
848 If the buffer length is nonzero, the length of the result is 851 If the buffer length is nonzero, the length of the result is
diff --git a/MAINTAINERS b/MAINTAINERS
index 7304d2e37a98..cb88f724e07c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3854,6 +3854,9 @@ T: git git://people.freedesktop.org/~airlied/linux
3854S: Maintained 3854S: Maintained
3855F: drivers/gpu/drm/ 3855F: drivers/gpu/drm/
3856F: drivers/gpu/vga/ 3856F: drivers/gpu/vga/
3857F: Documentation/devicetree/bindings/display/
3858F: Documentation/devicetree/bindings/gpu/
3859F: Documentation/devicetree/bindings/video/
3857F: Documentation/DocBook/gpu.* 3860F: Documentation/DocBook/gpu.*
3858F: include/drm/ 3861F: include/drm/
3859F: include/uapi/drm/ 3862F: include/uapi/drm/
@@ -4101,6 +4104,21 @@ F: drivers/gpu/drm/vc4/
4101F: include/uapi/drm/vc4_drm.h 4104F: include/uapi/drm/vc4_drm.h
4102F: Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt 4105F: Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
4103 4106
4107DRM DRIVERS FOR TI OMAP
4108M: Tomi Valkeinen <tomi.valkeinen@ti.com>
4109L: dri-devel@lists.freedesktop.org
4110S: Maintained
4111F: drivers/gpu/drm/omapdrm/
4112F: Documentation/devicetree/bindings/display/ti/
4113
4114DRM DRIVERS FOR TI LCDC
4115M: Jyri Sarha <jsarha@ti.com>
4116R: Tomi Valkeinen <tomi.valkeinen@ti.com>
4117L: dri-devel@lists.freedesktop.org
4118S: Maintained
4119F: drivers/gpu/drm/tilcdc/
4120F: Documentation/devicetree/bindings/display/tilcdc/
4121
4104DSBR100 USB FM RADIO DRIVER 4122DSBR100 USB FM RADIO DRIVER
4105M: Alexey Klimov <klimov.linux@gmail.com> 4123M: Alexey Klimov <klimov.linux@gmail.com>
4106L: linux-media@vger.kernel.org 4124L: linux-media@vger.kernel.org
@@ -7989,6 +8007,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
7989T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git 8007T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
7990T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 8008T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
7991S: Odd Fixes 8009S: Odd Fixes
8010F: Documentation/devicetree/bindings/net/
7992F: drivers/net/ 8011F: drivers/net/
7993F: include/linux/if_* 8012F: include/linux/if_*
7994F: include/linux/netdevice.h 8013F: include/linux/netdevice.h
@@ -8944,6 +8963,7 @@ M: Linus Walleij <linus.walleij@linaro.org>
8944L: linux-gpio@vger.kernel.org 8963L: linux-gpio@vger.kernel.org
8945T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git 8964T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
8946S: Maintained 8965S: Maintained
8966F: Documentation/devicetree/bindings/pinctrl/
8947F: drivers/pinctrl/ 8967F: drivers/pinctrl/
8948F: include/linux/pinctrl/ 8968F: include/linux/pinctrl/
8949 8969
diff --git a/Makefile b/Makefile
index 8908a51b895a..801457b847a4 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 7 2PATCHLEVEL = 7
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc2
5NAME = Psychotic Stoned Sheep 5NAME = Psychotic Stoned Sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index ef9119f7462e..4d9375814b53 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target,
733 if (ret) 733 if (ret)
734 return ret; 734 return ret;
735 735
736 vfp_flush_hwstate(thread);
737 thread->vfpstate.hard = new_vfp; 736 thread->vfpstate.hard = new_vfp;
737 vfp_flush_hwstate(thread);
738 738
739 return 0; 739 return 0;
740} 740}
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index d9c3ffc39329..390795b334c3 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -39,7 +39,7 @@
39#include "gpmc.h" 39#include "gpmc.h"
40#include "gpmc-smsc911x.h" 40#include "gpmc-smsc911x.h"
41 41
42#include <video/omapdss.h> 42#include <linux/platform_data/omapdss.h>
43#include <video/omap-panel-data.h> 43#include <video/omap-panel-data.h>
44 44
45#include "board-flash.h" 45#include "board-flash.h"
@@ -47,6 +47,7 @@
47#include "hsmmc.h" 47#include "hsmmc.h"
48#include "control.h" 48#include "control.h"
49#include "common-board-devices.h" 49#include "common-board-devices.h"
50#include "display.h"
50 51
51#define LDP_SMSC911X_CS 1 52#define LDP_SMSC911X_CS 1
52#define LDP_SMSC911X_GPIO 152 53#define LDP_SMSC911X_GPIO 152
diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c
index 9cfebc5c7455..180c6aa633bd 100644
--- a/arch/arm/mach-omap2/board-rx51-video.c
+++ b/arch/arm/mach-omap2/board-rx51-video.c
@@ -15,13 +15,14 @@
15#include <linux/spi/spi.h> 15#include <linux/spi/spi.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <asm/mach-types.h> 17#include <asm/mach-types.h>
18#include <video/omapdss.h> 18#include <linux/platform_data/omapdss.h>
19#include <video/omap-panel-data.h> 19#include <video/omap-panel-data.h>
20 20
21#include <linux/platform_data/spi-omap2-mcspi.h> 21#include <linux/platform_data/spi-omap2-mcspi.h>
22 22
23#include "soc.h" 23#include "soc.h"
24#include "board-rx51.h" 24#include "board-rx51.h"
25#include "display.h"
25 26
26#include "mux.h" 27#include "mux.h"
27 28
@@ -32,7 +33,6 @@
32static struct connector_atv_platform_data rx51_tv_pdata = { 33static struct connector_atv_platform_data rx51_tv_pdata = {
33 .name = "tv", 34 .name = "tv",
34 .source = "venc.0", 35 .source = "venc.0",
35 .connector_type = OMAP_DSS_VENC_TYPE_COMPOSITE,
36 .invert_polarity = false, 36 .invert_polarity = false,
37}; 37};
38 38
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 6ab13d18c636..70b3eaf085e4 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -29,7 +29,7 @@
29#include <linux/mfd/syscon.h> 29#include <linux/mfd/syscon.h>
30#include <linux/regmap.h> 30#include <linux/regmap.h>
31 31
32#include <video/omapdss.h> 32#include <linux/platform_data/omapdss.h>
33#include "omap_hwmod.h" 33#include "omap_hwmod.h"
34#include "omap_device.h" 34#include "omap_device.h"
35#include "omap-pm.h" 35#include "omap-pm.h"
diff --git a/arch/arm/mach-omap2/display.h b/arch/arm/mach-omap2/display.h
index 7375854b16c7..78f253005279 100644
--- a/arch/arm/mach-omap2/display.h
+++ b/arch/arm/mach-omap2/display.h
@@ -33,4 +33,9 @@ int omap_init_vout(void);
33 33
34struct device_node * __init omapdss_find_dss_of_node(void); 34struct device_node * __init omapdss_find_dss_of_node(void);
35 35
36struct omap_dss_board_info;
37
38/* Init with the board info */
39int omap_display_init(struct omap_dss_board_info *board_data);
40
36#endif 41#endif
diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c
index ea2be0f5953b..1d583bc0b1a9 100644
--- a/arch/arm/mach-omap2/dss-common.c
+++ b/arch/arm/mach-omap2/dss-common.c
@@ -27,7 +27,7 @@
27#include <linux/gpio.h> 27#include <linux/gpio.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29 29
30#include <video/omapdss.h> 30#include <linux/platform_data/omapdss.h>
31#include <video/omap-panel-data.h> 31#include <video/omap-panel-data.h>
32 32
33#include "soc.h" 33#include "soc.h"
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 76747d92bc72..5a0a691d4220 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -113,6 +113,18 @@ config ARCH_PHYS_ADDR_T_64BIT
113config MMU 113config MMU
114 def_bool y 114 def_bool y
115 115
116config ARM64_PAGE_SHIFT
117 int
118 default 16 if ARM64_64K_PAGES
119 default 14 if ARM64_16K_PAGES
120 default 12
121
122config ARM64_CONT_SHIFT
123 int
124 default 5 if ARM64_64K_PAGES
125 default 7 if ARM64_16K_PAGES
126 default 4
127
116config ARCH_MMAP_RND_BITS_MIN 128config ARCH_MMAP_RND_BITS_MIN
117 default 14 if ARM64_64K_PAGES 129 default 14 if ARM64_64K_PAGES
118 default 16 if ARM64_16K_PAGES 130 default 16 if ARM64_16K_PAGES
@@ -426,6 +438,15 @@ config CAVIUM_ERRATUM_22375
426 438
427 If unsure, say Y. 439 If unsure, say Y.
428 440
441config CAVIUM_ERRATUM_23144
442 bool "Cavium erratum 23144: ITS SYNC hang on dual socket system"
443 depends on NUMA
444 default y
445 help
446 ITS SYNC command hang for cross node io and collections/cpu mapping.
447
448 If unsure, say Y.
449
429config CAVIUM_ERRATUM_23154 450config CAVIUM_ERRATUM_23154
430 bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed" 451 bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
431 default y 452 default y
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 710fde4ad0f0..0cc758cdd0dc 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -12,7 +12,8 @@ config ARM64_PTDUMP
12 who are working in architecture specific areas of the kernel. 12 who are working in architecture specific areas of the kernel.
13 It is probably not a good idea to enable this feature in a production 13 It is probably not a good idea to enable this feature in a production
14 kernel. 14 kernel.
15 If in doubt, say "N" 15
16 If in doubt, say N.
16 17
17config PID_IN_CONTEXTIDR 18config PID_IN_CONTEXTIDR
18 bool "Write the current PID to the CONTEXTIDR register" 19 bool "Write the current PID to the CONTEXTIDR register"
@@ -38,15 +39,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
38 value. 39 value.
39 40
40config DEBUG_SET_MODULE_RONX 41config DEBUG_SET_MODULE_RONX
41 bool "Set loadable kernel module data as NX and text as RO" 42 bool "Set loadable kernel module data as NX and text as RO"
42 depends on MODULES 43 depends on MODULES
43 help 44 default y
44 This option helps catch unintended modifications to loadable 45 help
45 kernel module's text and read-only data. It also prevents execution 46 Is this is set, kernel module text and rodata will be made read-only.
46 of module data. Such protection may interfere with run-time code 47 This is to help catch accidental or malicious attempts to change the
47 patching and dynamic kernel tracing - and they might also protect 48 kernel's executable code.
48 against certain classes of kernel exploits. 49
49 If in doubt, say "N". 50 If in doubt, say Y.
50 51
51config DEBUG_RODATA 52config DEBUG_RODATA
52 bool "Make kernel text and rodata read-only" 53 bool "Make kernel text and rodata read-only"
@@ -56,7 +57,7 @@ config DEBUG_RODATA
56 is to help catch accidental or malicious attempts to change the 57 is to help catch accidental or malicious attempts to change the
57 kernel's executable code. 58 kernel's executable code.
58 59
59 If in doubt, say Y 60 If in doubt, say Y.
60 61
61config DEBUG_ALIGN_RODATA 62config DEBUG_ALIGN_RODATA
62 depends on DEBUG_RODATA 63 depends on DEBUG_RODATA
@@ -69,7 +70,7 @@ config DEBUG_ALIGN_RODATA
69 alignment and potentially wasted space. Turn on this option if 70 alignment and potentially wasted space. Turn on this option if
70 performance is more important than memory pressure. 71 performance is more important than memory pressure.
71 72
72 If in doubt, say N 73 If in doubt, say N.
73 74
74source "drivers/hwtracing/coresight/Kconfig" 75source "drivers/hwtracing/coresight/Kconfig"
75 76
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 354d75402ace..7085e322dc42 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -60,7 +60,9 @@ head-y := arch/arm64/kernel/head.o
60 60
61# The byte offset of the kernel image in RAM from the start of RAM. 61# The byte offset of the kernel image in RAM from the start of RAM.
62ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y) 62ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
63TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}') 63TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
64 int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
65 rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
64else 66else
65TEXT_OFFSET := 0x00080000 67TEXT_OFFSET := 0x00080000
66endif 68endif
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 7a09c48c0475..579b6e654f2d 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -160,14 +160,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
160#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12)) 160#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
161#endif 161#endif
162 162
163#ifdef CONFIG_COMPAT
164
165#ifdef __AARCH64EB__ 163#ifdef __AARCH64EB__
166#define COMPAT_ELF_PLATFORM ("v8b") 164#define COMPAT_ELF_PLATFORM ("v8b")
167#else 165#else
168#define COMPAT_ELF_PLATFORM ("v8l") 166#define COMPAT_ELF_PLATFORM ("v8l")
169#endif 167#endif
170 168
169#ifdef CONFIG_COMPAT
170
171#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) 171#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
172 172
173/* AArch32 registers. */ 173/* AArch32 registers. */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 72a3025bb583..31b73227b41f 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -55,8 +55,9 @@
55#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) 55#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
56 56
57/* 57/*
58 * PAGE_OFFSET - the virtual address of the start of the kernel image (top 58 * PAGE_OFFSET - the virtual address of the start of the linear map (top
59 * (VA_BITS - 1)) 59 * (VA_BITS - 1))
60 * KIMAGE_VADDR - the virtual address of the start of the kernel image
60 * VA_BITS - the maximum number of bits for virtual addresses. 61 * VA_BITS - the maximum number of bits for virtual addresses.
61 * VA_START - the first kernel virtual address. 62 * VA_START - the first kernel virtual address.
62 * TASK_SIZE - the maximum size of a user space task. 63 * TASK_SIZE - the maximum size of a user space task.
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 17b45f7d96d3..8472c6def5ef 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -23,16 +23,8 @@
23 23
24/* PAGE_SHIFT determines the page size */ 24/* PAGE_SHIFT determines the page size */
25/* CONT_SHIFT determines the number of pages which can be tracked together */ 25/* CONT_SHIFT determines the number of pages which can be tracked together */
26#ifdef CONFIG_ARM64_64K_PAGES 26#define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT
27#define PAGE_SHIFT 16 27#define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT
28#define CONT_SHIFT 5
29#elif defined(CONFIG_ARM64_16K_PAGES)
30#define PAGE_SHIFT 14
31#define CONT_SHIFT 7
32#else
33#define PAGE_SHIFT 12
34#define CONT_SHIFT 4
35#endif
36#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) 28#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
37#define PAGE_MASK (~(PAGE_SIZE-1)) 29#define PAGE_MASK (~(PAGE_SIZE-1))
38 30
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 0685d74572af..9e397a542756 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -81,19 +81,6 @@ static inline void set_fs(mm_segment_t fs)
81#define segment_eq(a, b) ((a) == (b)) 81#define segment_eq(a, b) ((a) == (b))
82 82
83/* 83/*
84 * Return 1 if addr < current->addr_limit, 0 otherwise.
85 */
86#define __addr_ok(addr) \
87({ \
88 unsigned long flag; \
89 asm("cmp %1, %0; cset %0, lo" \
90 : "=&r" (flag) \
91 : "r" (addr), "0" (current_thread_info()->addr_limit) \
92 : "cc"); \
93 flag; \
94})
95
96/*
97 * Test whether a block of memory is a valid user space address. 84 * Test whether a block of memory is a valid user space address.
98 * Returns 1 if the range is valid, 0 otherwise. 85 * Returns 1 if the range is valid, 0 otherwise.
99 * 86 *
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 41e58fe3c041..e78ac26324bd 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
44#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) 44#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
45#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) 45#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
46 46
47#define __NR_compat_syscalls 390 47#define __NR_compat_syscalls 394
48#endif 48#endif
49 49
50#define __ARCH_WANT_SYS_CLONE 50#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 5b925b761a2a..b7e8ef16ff0d 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -801,6 +801,14 @@ __SYSCALL(__NR_execveat, compat_sys_execveat)
801__SYSCALL(__NR_userfaultfd, sys_userfaultfd) 801__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
802#define __NR_membarrier 389 802#define __NR_membarrier 389
803__SYSCALL(__NR_membarrier, sys_membarrier) 803__SYSCALL(__NR_membarrier, sys_membarrier)
804#define __NR_mlock2 390
805__SYSCALL(__NR_mlock2, sys_mlock2)
806#define __NR_copy_file_range 391
807__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
808#define __NR_preadv2 392
809__SYSCALL(__NR_preadv2, compat_sys_preadv2)
810#define __NR_pwritev2 393
811__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
804 812
805/* 813/*
806 * Please add new compat syscalls above this comment and update 814 * Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 3808470486f3..c173d329397f 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -22,6 +22,8 @@
22 22
23#include <linux/bitops.h> 23#include <linux/bitops.h>
24#include <linux/bug.h> 24#include <linux/bug.h>
25#include <linux/compat.h>
26#include <linux/elf.h>
25#include <linux/init.h> 27#include <linux/init.h>
26#include <linux/kernel.h> 28#include <linux/kernel.h>
27#include <linux/personality.h> 29#include <linux/personality.h>
@@ -104,6 +106,7 @@ static const char *const compat_hwcap2_str[] = {
104static int c_show(struct seq_file *m, void *v) 106static int c_show(struct seq_file *m, void *v)
105{ 107{
106 int i, j; 108 int i, j;
109 bool compat = personality(current->personality) == PER_LINUX32;
107 110
108 for_each_online_cpu(i) { 111 for_each_online_cpu(i) {
109 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); 112 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
@@ -115,6 +118,9 @@ static int c_show(struct seq_file *m, void *v)
115 * "processor". Give glibc what it expects. 118 * "processor". Give glibc what it expects.
116 */ 119 */
117 seq_printf(m, "processor\t: %d\n", i); 120 seq_printf(m, "processor\t: %d\n", i);
121 if (compat)
122 seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
123 MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
118 124
119 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 125 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
120 loops_per_jiffy / (500000UL/HZ), 126 loops_per_jiffy / (500000UL/HZ),
@@ -127,7 +133,7 @@ static int c_show(struct seq_file *m, void *v)
127 * software which does already (at least for 32-bit). 133 * software which does already (at least for 32-bit).
128 */ 134 */
129 seq_puts(m, "Features\t:"); 135 seq_puts(m, "Features\t:");
130 if (personality(current->personality) == PER_LINUX32) { 136 if (compat) {
131#ifdef CONFIG_COMPAT 137#ifdef CONFIG_COMPAT
132 for (j = 0; compat_hwcap_str[j]; j++) 138 for (j = 0; compat_hwcap_str[j]; j++)
133 if (compat_elf_hwcap & (1 << j)) 139 if (compat_elf_hwcap & (1 << j))
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index c5392081b49b..f7cf463107df 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -477,8 +477,9 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
477 void __user *pc = (void __user *)instruction_pointer(regs); 477 void __user *pc = (void __user *)instruction_pointer(regs);
478 console_verbose(); 478 console_verbose();
479 479
480 pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n", 480 pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
481 handler[reason], esr, esr_get_class_string(esr)); 481 handler[reason], smp_processor_id(), esr,
482 esr_get_class_string(esr));
482 __show_regs(regs); 483 __show_regs(regs);
483 484
484 info.si_signo = SIGILL; 485 info.si_signo = SIGILL;
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index fff7cd42b3a3..5f8f80b4a224 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -169,7 +169,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
169 * Make sure stores to the GIC via the memory mapped interface 169 * Make sure stores to the GIC via the memory mapped interface
170 * are now visible to the system register interface. 170 * are now visible to the system register interface.
171 */ 171 */
172 dsb(st); 172 if (!cpu_if->vgic_sre)
173 dsb(st);
173 174
174 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); 175 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
175 176
@@ -190,12 +191,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
190 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) 191 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
191 continue; 192 continue;
192 193
193 if (cpu_if->vgic_elrsr & (1 << i)) { 194 if (cpu_if->vgic_elrsr & (1 << i))
194 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; 195 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
195 continue; 196 else
196 } 197 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
197 198
198 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
199 __gic_v3_set_lr(0, i); 199 __gic_v3_set_lr(0, i);
200 } 200 }
201 201
@@ -236,8 +236,12 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
236 236
237 val = read_gicreg(ICC_SRE_EL2); 237 val = read_gicreg(ICC_SRE_EL2);
238 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); 238 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
239 isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ 239
240 write_gicreg(1, ICC_SRE_EL1); 240 if (!cpu_if->vgic_sre) {
241 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
242 isb();
243 write_gicreg(1, ICC_SRE_EL1);
244 }
241} 245}
242 246
243void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) 247void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
@@ -256,8 +260,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
256 * been actually programmed with the value we want before 260 * been actually programmed with the value we want before
257 * starting to mess with the rest of the GIC. 261 * starting to mess with the rest of the GIC.
258 */ 262 */
259 write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1); 263 if (!cpu_if->vgic_sre) {
260 isb(); 264 write_gicreg(0, ICC_SRE_EL1);
265 isb();
266 }
261 267
262 val = read_gicreg(ICH_VTR_EL2); 268 val = read_gicreg(ICH_VTR_EL2);
263 max_lr_idx = vtr_to_max_lr_idx(val); 269 max_lr_idx = vtr_to_max_lr_idx(val);
@@ -306,18 +312,18 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
306 * (re)distributors. This ensure the guest will read the 312 * (re)distributors. This ensure the guest will read the
307 * correct values from the memory-mapped interface. 313 * correct values from the memory-mapped interface.
308 */ 314 */
309 isb(); 315 if (!cpu_if->vgic_sre) {
310 dsb(sy); 316 isb();
317 dsb(sy);
318 }
311 vcpu->arch.vgic_cpu.live_lrs = live_lrs; 319 vcpu->arch.vgic_cpu.live_lrs = live_lrs;
312 320
313 /* 321 /*
314 * Prevent the guest from touching the GIC system registers if 322 * Prevent the guest from touching the GIC system registers if
315 * SRE isn't enabled for GICv3 emulation. 323 * SRE isn't enabled for GICv3 emulation.
316 */ 324 */
317 if (!cpu_if->vgic_sre) { 325 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
318 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, 326 ICC_SRE_EL2);
319 ICC_SRE_EL2);
320 }
321} 327}
322 328
323void __hyp_text __vgic_v3_init_lrs(void) 329void __hyp_text __vgic_v3_init_lrs(void)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 7bbe3ff02602..a57d650f552c 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -134,6 +134,17 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
134 return true; 134 return true;
135} 135}
136 136
137static bool access_gic_sre(struct kvm_vcpu *vcpu,
138 struct sys_reg_params *p,
139 const struct sys_reg_desc *r)
140{
141 if (p->is_write)
142 return ignore_write(vcpu, p);
143
144 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
145 return true;
146}
147
137static bool trap_raz_wi(struct kvm_vcpu *vcpu, 148static bool trap_raz_wi(struct kvm_vcpu *vcpu,
138 struct sys_reg_params *p, 149 struct sys_reg_params *p,
139 const struct sys_reg_desc *r) 150 const struct sys_reg_desc *r)
@@ -958,7 +969,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
958 access_gic_sgi }, 969 access_gic_sgi },
959 /* ICC_SRE_EL1 */ 970 /* ICC_SRE_EL1 */
960 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), 971 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
961 trap_raz_wi }, 972 access_gic_sre },
962 973
963 /* CONTEXTIDR_EL1 */ 974 /* CONTEXTIDR_EL1 */
964 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), 975 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 8404190fe2bd..ccfde237d6e6 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -150,6 +150,7 @@ static const struct prot_bits pte_bits[] = {
150 150
151struct pg_level { 151struct pg_level {
152 const struct prot_bits *bits; 152 const struct prot_bits *bits;
153 const char *name;
153 size_t num; 154 size_t num;
154 u64 mask; 155 u64 mask;
155}; 156};
@@ -157,15 +158,19 @@ struct pg_level {
157static struct pg_level pg_level[] = { 158static struct pg_level pg_level[] = {
158 { 159 {
159 }, { /* pgd */ 160 }, { /* pgd */
161 .name = "PGD",
160 .bits = pte_bits, 162 .bits = pte_bits,
161 .num = ARRAY_SIZE(pte_bits), 163 .num = ARRAY_SIZE(pte_bits),
162 }, { /* pud */ 164 }, { /* pud */
165 .name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
163 .bits = pte_bits, 166 .bits = pte_bits,
164 .num = ARRAY_SIZE(pte_bits), 167 .num = ARRAY_SIZE(pte_bits),
165 }, { /* pmd */ 168 }, { /* pmd */
169 .name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
166 .bits = pte_bits, 170 .bits = pte_bits,
167 .num = ARRAY_SIZE(pte_bits), 171 .num = ARRAY_SIZE(pte_bits),
168 }, { /* pte */ 172 }, { /* pte */
173 .name = "PTE",
169 .bits = pte_bits, 174 .bits = pte_bits,
170 .num = ARRAY_SIZE(pte_bits), 175 .num = ARRAY_SIZE(pte_bits),
171 }, 176 },
@@ -214,7 +219,8 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
214 delta >>= 10; 219 delta >>= 10;
215 unit++; 220 unit++;
216 } 221 }
217 seq_printf(st->seq, "%9lu%c", delta, *unit); 222 seq_printf(st->seq, "%9lu%c %s", delta, *unit,
223 pg_level[st->level].name);
218 if (pg_level[st->level].bits) 224 if (pg_level[st->level].bits)
219 dump_prot(st, pg_level[st->level].bits, 225 dump_prot(st, pg_level[st->level].bits,
220 pg_level[st->level].num); 226 pg_level[st->level].num);
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index aa8aee7d6929..2e49bd252fe7 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -306,6 +306,10 @@ static __init int setup_hugepagesz(char *opt)
306 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); 306 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
307 } else if (ps == PUD_SIZE) { 307 } else if (ps == PUD_SIZE) {
308 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 308 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
309 } else if (ps == (PAGE_SIZE * CONT_PTES)) {
310 hugetlb_add_hstate(CONT_PTE_SHIFT);
311 } else if (ps == (PMD_SIZE * CONT_PMDS)) {
312 hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
309 } else { 313 } else {
310 hugetlb_bad_size(); 314 hugetlb_bad_size();
311 pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); 315 pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
@@ -314,3 +318,13 @@ static __init int setup_hugepagesz(char *opt)
314 return 1; 318 return 1;
315} 319}
316__setup("hugepagesz=", setup_hugepagesz); 320__setup("hugepagesz=", setup_hugepagesz);
321
322#ifdef CONFIG_ARM64_64K_PAGES
323static __init int add_default_hugepagesz(void)
324{
325 if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
326 hugetlb_add_hstate(CONT_PMD_SHIFT);
327 return 0;
328}
329arch_initcall(add_default_hugepagesz);
330#endif
diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h
index 4736020ba5ea..5e953ab4530d 100644
--- a/arch/parisc/include/asm/traps.h
+++ b/arch/parisc/include/asm/traps.h
@@ -8,6 +8,8 @@ struct pt_regs;
8void parisc_terminate(char *msg, struct pt_regs *regs, 8void parisc_terminate(char *msg, struct pt_regs *regs,
9 int code, unsigned long offset) __noreturn __cold; 9 int code, unsigned long offset) __noreturn __cold;
10 10
11void die_if_kernel(char *str, struct pt_regs *regs, long err);
12
11/* mm/fault.c */ 13/* mm/fault.c */
12void do_page_fault(struct pt_regs *regs, unsigned long code, 14void do_page_fault(struct pt_regs *regs, unsigned long code,
13 unsigned long address); 15 unsigned long address);
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index e81ccf1716e9..5adc339eb7c8 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -324,8 +324,9 @@ int init_per_cpu(int cpunum)
324 per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; 324 per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
325 per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; 325 per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
326 326
327 printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n", 327 if (cpunum == 0)
328 cpunum, coproc_cfg.revision, coproc_cfg.model); 328 printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
329 cpunum, coproc_cfg.revision, coproc_cfg.model);
329 330
330 /* 331 /*
331 ** store status register to stack (hopefully aligned) 332 ** store status register to stack (hopefully aligned)
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 58dd6801f5be..31ec99a5f119 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -309,11 +309,6 @@ void __init time_init(void)
309 clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz, 309 clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
310 NSEC_PER_MSEC, 0); 310 NSEC_PER_MSEC, 0);
311 311
312#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
313 /* At bootup only one 64bit CPU is online and cr16 is "stable" */
314 set_sched_clock_stable();
315#endif
316
317 start_cpu_itimer(); /* get CPU 0 started */ 312 start_cpu_itimer(); /* get CPU 0 started */
318 313
319 /* register at clocksource framework */ 314 /* register at clocksource framework */
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index d7c0acb35ec2..2b65c0177778 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -28,6 +28,7 @@
28#include <linux/ratelimit.h> 28#include <linux/ratelimit.h>
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30#include <asm/hardirq.h> 30#include <asm/hardirq.h>
31#include <asm/traps.h>
31 32
32/* #define DEBUG_UNALIGNED 1 */ 33/* #define DEBUG_UNALIGNED 1 */
33 34
@@ -130,8 +131,6 @@
130 131
131int unaligned_enabled __read_mostly = 1; 132int unaligned_enabled __read_mostly = 1;
132 133
133void die_if_kernel (char *str, struct pt_regs *regs, long err);
134
135static int emulate_ldh(struct pt_regs *regs, int toreg) 134static int emulate_ldh(struct pt_regs *regs, int toreg)
136{ 135{
137 unsigned long saddr = regs->ior; 136 unsigned long saddr = regs->ior;
@@ -666,7 +665,7 @@ void handle_unaligned(struct pt_regs *regs)
666 break; 665 break;
667 } 666 }
668 667
669 if (modify && R1(regs->iir)) 668 if (ret == 0 && modify && R1(regs->iir))
670 regs->gr[R1(regs->iir)] = newbase; 669 regs->gr[R1(regs->iir)] = newbase;
671 670
672 671
@@ -677,6 +676,14 @@ void handle_unaligned(struct pt_regs *regs)
677 676
678 if (ret) 677 if (ret)
679 { 678 {
679 /*
680 * The unaligned handler failed.
681 * If we were called by __get_user() or __put_user() jump
682 * to it's exception fixup handler instead of crashing.
683 */
684 if (!user_mode(regs) && fixup_exception(regs))
685 return;
686
680 printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret); 687 printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
681 die_if_kernel("Unaligned data reference", regs, 28); 688 die_if_kernel("Unaligned data reference", regs, 28);
682 689
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index ddd988b267a9..e278a87f43cc 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -75,7 +75,10 @@ find_unwind_entry(unsigned long addr)
75 if (addr >= kernel_unwind_table.start && 75 if (addr >= kernel_unwind_table.start &&
76 addr <= kernel_unwind_table.end) 76 addr <= kernel_unwind_table.end)
77 e = find_unwind_entry_in_table(&kernel_unwind_table, addr); 77 e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
78 else 78 else {
79 unsigned long flags;
80
81 spin_lock_irqsave(&unwind_lock, flags);
79 list_for_each_entry(table, &unwind_tables, list) { 82 list_for_each_entry(table, &unwind_tables, list) {
80 if (addr >= table->start && 83 if (addr >= table->start &&
81 addr <= table->end) 84 addr <= table->end)
@@ -86,6 +89,8 @@ find_unwind_entry(unsigned long addr)
86 break; 89 break;
87 } 90 }
88 } 91 }
92 spin_unlock_irqrestore(&unwind_lock, flags);
93 }
89 94
90 return e; 95 return e;
91} 96}
@@ -303,18 +308,16 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
303 308
304 insn = *(unsigned int *)npc; 309 insn = *(unsigned int *)npc;
305 310
306 if ((insn & 0xffffc000) == 0x37de0000 || 311 if ((insn & 0xffffc001) == 0x37de0000 ||
307 (insn & 0xffe00000) == 0x6fc00000) { 312 (insn & 0xffe00001) == 0x6fc00000) {
308 /* ldo X(sp), sp, or stwm X,D(sp) */ 313 /* ldo X(sp), sp, or stwm X,D(sp) */
309 frame_size += (insn & 0x1 ? -1 << 13 : 0) | 314 frame_size += (insn & 0x3fff) >> 1;
310 ((insn & 0x3fff) >> 1);
311 dbg("analyzing func @ %lx, insn=%08x @ " 315 dbg("analyzing func @ %lx, insn=%08x @ "
312 "%lx, frame_size = %ld\n", info->ip, 316 "%lx, frame_size = %ld\n", info->ip,
313 insn, npc, frame_size); 317 insn, npc, frame_size);
314 } else if ((insn & 0xffe00008) == 0x73c00008) { 318 } else if ((insn & 0xffe00009) == 0x73c00008) {
315 /* std,ma X,D(sp) */ 319 /* std,ma X,D(sp) */
316 frame_size += (insn & 0x1 ? -1 << 13 : 0) | 320 frame_size += ((insn >> 4) & 0x3ff) << 3;
317 (((insn >> 4) & 0x3ff) << 3);
318 dbg("analyzing func @ %lx, insn=%08x @ " 321 dbg("analyzing func @ %lx, insn=%08x @ "
319 "%lx, frame_size = %ld\n", info->ip, 322 "%lx, frame_size = %ld\n", info->ip,
320 insn, npc, frame_size); 323 insn, npc, frame_size);
@@ -333,6 +336,9 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
333 } 336 }
334 } 337 }
335 338
339 if (frame_size > e->Total_frame_size << 3)
340 frame_size = e->Total_frame_size << 3;
341
336 if (!unwind_special(info, e->region_start, frame_size)) { 342 if (!unwind_special(info, e->region_start, frame_size)) {
337 info->prev_sp = info->sp - frame_size; 343 info->prev_sp = info->sp - frame_size;
338 if (e->Millicode) 344 if (e->Millicode)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c1e82e968506..a0948f40bc7b 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -717,7 +717,7 @@
717#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */ 717#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
718#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ 718#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
719#define SPRN_MMCR1 798 719#define SPRN_MMCR1 798
720#define SPRN_MMCR2 769 720#define SPRN_MMCR2 785
721#define SPRN_MMCRA 0x312 721#define SPRN_MMCRA 0x312
722#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ 722#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
723#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL 723#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
@@ -754,13 +754,13 @@
754#define SPRN_PMC6 792 754#define SPRN_PMC6 792
755#define SPRN_PMC7 793 755#define SPRN_PMC7 793
756#define SPRN_PMC8 794 756#define SPRN_PMC8 794
757#define SPRN_SIAR 780
758#define SPRN_SDAR 781
759#define SPRN_SIER 784 757#define SPRN_SIER 784
760#define SIER_SIPR 0x2000000 /* Sampled MSR_PR */ 758#define SIER_SIPR 0x2000000 /* Sampled MSR_PR */
761#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */ 759#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
762#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ 760#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
763#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ 761#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
762#define SPRN_SIAR 796
763#define SPRN_SDAR 797
764#define SPRN_TACR 888 764#define SPRN_TACR 888
765#define SPRN_TCSCR 889 765#define SPRN_TCSCR 889
766#define SPRN_CSIGR 890 766#define SPRN_CSIGR 890
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index da5192590c44..ccd2037c797f 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -656,6 +656,7 @@ unsigned char ibm_architecture_vec[] = {
656 W(0xffff0000), W(0x003e0000), /* POWER6 */ 656 W(0xffff0000), W(0x003e0000), /* POWER6 */
657 W(0xffff0000), W(0x003f0000), /* POWER7 */ 657 W(0xffff0000), W(0x003f0000), /* POWER7 */
658 W(0xffff0000), W(0x004b0000), /* POWER8E */ 658 W(0xffff0000), W(0x004b0000), /* POWER8E */
659 W(0xffff0000), W(0x004c0000), /* POWER8NVL */
659 W(0xffff0000), W(0x004d0000), /* POWER8 */ 660 W(0xffff0000), W(0x004d0000), /* POWER8 */
660 W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */ 661 W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
661 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ 662 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 59268969a0bc..b2740c67e172 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -159,6 +159,19 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
159 }, 159 },
160}; 160};
161 161
162/*
163 * 'R' and 'C' update notes:
164 * - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
165 * create writeable HPTEs without C set, because the hcall H_PROTECT
166 * that we use in that case will not update C
167 * - The above is however not a problem, because we also don't do that
168 * fancy "no flush" variant of eviction and we use H_REMOVE which will
169 * do the right thing and thus we don't have the race I described earlier
170 *
171 * - Under bare metal, we do have the race, so we need R and C set
172 * - We make sure R is always set and never lost
173 * - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping
174 */
162unsigned long htab_convert_pte_flags(unsigned long pteflags) 175unsigned long htab_convert_pte_flags(unsigned long pteflags)
163{ 176{
164 unsigned long rflags = 0; 177 unsigned long rflags = 0;
@@ -186,9 +199,14 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
186 rflags |= 0x1; 199 rflags |= 0x1;
187 } 200 }
188 /* 201 /*
189 * Always add "C" bit for perf. Memory coherence is always enabled 202 * We can't allow hardware to update hpte bits. Hence always
203 * set 'R' bit and set 'C' if it is a write fault
204 * Memory coherence is always enabled
190 */ 205 */
191 rflags |= HPTE_R_C | HPTE_R_M; 206 rflags |= HPTE_R_R | HPTE_R_M;
207
208 if (pteflags & _PAGE_DIRTY)
209 rflags |= HPTE_R_C;
192 /* 210 /*
193 * Add in WIG bits 211 * Add in WIG bits
194 */ 212 */
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index eb4451144746..670318766545 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -33,10 +33,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
33 changed = !pmd_same(*(pmdp), entry); 33 changed = !pmd_same(*(pmdp), entry);
34 if (changed) { 34 if (changed) {
35 __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry)); 35 __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
36 /* 36 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
37 * Since we are not supporting SW TLB systems, we don't
38 * have any thing similar to flush_tlb_page_nohash()
39 */
40 } 37 }
41 return changed; 38 return changed;
42} 39}
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 18b2c11604fa..c939e6e57a9e 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -296,11 +296,6 @@ found:
296void __init radix__early_init_mmu(void) 296void __init radix__early_init_mmu(void)
297{ 297{
298 unsigned long lpcr; 298 unsigned long lpcr;
299 /*
300 * setup LPCR UPRT based on mmu_features
301 */
302 lpcr = mfspr(SPRN_LPCR);
303 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
304 299
305#ifdef CONFIG_PPC_64K_PAGES 300#ifdef CONFIG_PPC_64K_PAGES
306 /* PAGE_SIZE mappings */ 301 /* PAGE_SIZE mappings */
@@ -343,8 +338,11 @@ void __init radix__early_init_mmu(void)
343 __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT; 338 __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
344 339
345 radix_init_page_sizes(); 340 radix_init_page_sizes();
346 if (!firmware_has_feature(FW_FEATURE_LPAR)) 341 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
342 lpcr = mfspr(SPRN_LPCR);
343 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
347 radix_init_partition_table(); 344 radix_init_partition_table();
345 }
348 346
349 radix_init_pgtable(); 347 radix_init_pgtable();
350} 348}
@@ -353,16 +351,15 @@ void radix__early_init_mmu_secondary(void)
353{ 351{
354 unsigned long lpcr; 352 unsigned long lpcr;
355 /* 353 /*
356 * setup LPCR UPRT based on mmu_features 354 * update partition table control register and UPRT
357 */ 355 */
358 lpcr = mfspr(SPRN_LPCR); 356 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
359 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); 357 lpcr = mfspr(SPRN_LPCR);
360 /* 358 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
361 * update partition table control register, 64 K size. 359
362 */
363 if (!firmware_has_feature(FW_FEATURE_LPAR))
364 mtspr(SPRN_PTCR, 360 mtspr(SPRN_PTCR,
365 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); 361 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
362 }
366} 363}
367 364
368void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, 365void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index ac3ffd97e059..3998e0f9a03b 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -53,7 +53,6 @@ static int ibm_read_slot_reset_state2;
53static int ibm_slot_error_detail; 53static int ibm_slot_error_detail;
54static int ibm_get_config_addr_info; 54static int ibm_get_config_addr_info;
55static int ibm_get_config_addr_info2; 55static int ibm_get_config_addr_info2;
56static int ibm_configure_bridge;
57static int ibm_configure_pe; 56static int ibm_configure_pe;
58 57
59/* 58/*
@@ -81,7 +80,14 @@ static int pseries_eeh_init(void)
81 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); 80 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
82 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); 81 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
83 ibm_configure_pe = rtas_token("ibm,configure-pe"); 82 ibm_configure_pe = rtas_token("ibm,configure-pe");
84 ibm_configure_bridge = rtas_token("ibm,configure-bridge"); 83
84 /*
85 * ibm,configure-pe and ibm,configure-bridge have the same semantics,
86 * however ibm,configure-pe can be faster. If we can't find
87 * ibm,configure-pe then fall back to using ibm,configure-bridge.
88 */
89 if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE)
90 ibm_configure_pe = rtas_token("ibm,configure-bridge");
85 91
86 /* 92 /*
87 * Necessary sanity check. We needn't check "get-config-addr-info" 93 * Necessary sanity check. We needn't check "get-config-addr-info"
@@ -93,8 +99,7 @@ static int pseries_eeh_init(void)
93 (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && 99 (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
94 ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) || 100 ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) ||
95 ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE || 101 ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE ||
96 (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && 102 ibm_configure_pe == RTAS_UNKNOWN_SERVICE) {
97 ibm_configure_bridge == RTAS_UNKNOWN_SERVICE)) {
98 pr_info("EEH functionality not supported\n"); 103 pr_info("EEH functionality not supported\n");
99 return -EINVAL; 104 return -EINVAL;
100 } 105 }
@@ -615,29 +620,41 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
615{ 620{
616 int config_addr; 621 int config_addr;
617 int ret; 622 int ret;
623 /* Waiting 0.2s maximum before skipping configuration */
624 int max_wait = 200;
618 625
619 /* Figure out the PE address */ 626 /* Figure out the PE address */
620 config_addr = pe->config_addr; 627 config_addr = pe->config_addr;
621 if (pe->addr) 628 if (pe->addr)
622 config_addr = pe->addr; 629 config_addr = pe->addr;
623 630
624 /* Use new configure-pe function, if supported */ 631 while (max_wait > 0) {
625 if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
626 ret = rtas_call(ibm_configure_pe, 3, 1, NULL, 632 ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
627 config_addr, BUID_HI(pe->phb->buid), 633 config_addr, BUID_HI(pe->phb->buid),
628 BUID_LO(pe->phb->buid)); 634 BUID_LO(pe->phb->buid));
629 } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
630 ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
631 config_addr, BUID_HI(pe->phb->buid),
632 BUID_LO(pe->phb->buid));
633 } else {
634 return -EFAULT;
635 }
636 635
637 if (ret) 636 if (!ret)
638 pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", 637 return ret;
639 __func__, pe->phb->global_number, pe->addr, ret); 638
639 /*
640 * If RTAS returns a delay value that's above 100ms, cut it
641 * down to 100ms in case firmware made a mistake. For more
642 * on how these delay values work see rtas_busy_delay_time
643 */
644 if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
645 ret <= RTAS_EXTENDED_DELAY_MAX)
646 ret = RTAS_EXTENDED_DELAY_MIN+2;
647
648 max_wait -= rtas_busy_delay_time(ret);
649
650 if (max_wait < 0)
651 break;
652
653 rtas_busy_delay(ret);
654 }
640 655
656 pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
657 __func__, pe->phb->global_number, pe->addr, ret);
641 return ret; 658 return ret;
642} 659}
643 660
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 0ac42cc4f880..d5ec71b2ed02 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -1,8 +1,7 @@
1CONFIG_SYSVIPC=y 1CONFIG_SYSVIPC=y
2CONFIG_POSIX_MQUEUE=y 2CONFIG_POSIX_MQUEUE=y
3CONFIG_FHANDLE=y
4CONFIG_AUDIT=y 3CONFIG_AUDIT=y
5CONFIG_NO_HZ=y 4CONFIG_NO_HZ_IDLE=y
6CONFIG_HIGH_RES_TIMERS=y 5CONFIG_HIGH_RES_TIMERS=y
7CONFIG_BSD_PROCESS_ACCT=y 6CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_BSD_PROCESS_ACCT_V3=y 7CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -13,19 +12,19 @@ CONFIG_TASK_IO_ACCOUNTING=y
13CONFIG_IKCONFIG=y 12CONFIG_IKCONFIG=y
14CONFIG_IKCONFIG_PROC=y 13CONFIG_IKCONFIG_PROC=y
15CONFIG_NUMA_BALANCING=y 14CONFIG_NUMA_BALANCING=y
16CONFIG_CGROUP_FREEZER=y
17CONFIG_CGROUP_PIDS=y
18CONFIG_CGROUP_DEVICE=y
19CONFIG_CPUSETS=y
20CONFIG_CGROUP_CPUACCT=y
21CONFIG_MEMCG=y 15CONFIG_MEMCG=y
22CONFIG_MEMCG_SWAP=y 16CONFIG_MEMCG_SWAP=y
23CONFIG_MEMCG_KMEM=y 17CONFIG_BLK_CGROUP=y
24CONFIG_CGROUP_HUGETLB=y
25CONFIG_CGROUP_PERF=y
26CONFIG_CFS_BANDWIDTH=y 18CONFIG_CFS_BANDWIDTH=y
27CONFIG_RT_GROUP_SCHED=y 19CONFIG_RT_GROUP_SCHED=y
28CONFIG_BLK_CGROUP=y 20CONFIG_CGROUP_PIDS=y
21CONFIG_CGROUP_FREEZER=y
22CONFIG_CGROUP_HUGETLB=y
23CONFIG_CPUSETS=y
24CONFIG_CGROUP_DEVICE=y
25CONFIG_CGROUP_CPUACCT=y
26CONFIG_CGROUP_PERF=y
27CONFIG_CHECKPOINT_RESTORE=y
29CONFIG_NAMESPACES=y 28CONFIG_NAMESPACES=y
30CONFIG_USER_NS=y 29CONFIG_USER_NS=y
31CONFIG_SCHED_AUTOGROUP=y 30CONFIG_SCHED_AUTOGROUP=y
@@ -55,7 +54,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
55CONFIG_CFQ_GROUP_IOSCHED=y 54CONFIG_CFQ_GROUP_IOSCHED=y
56CONFIG_DEFAULT_DEADLINE=y 55CONFIG_DEFAULT_DEADLINE=y
57CONFIG_LIVEPATCH=y 56CONFIG_LIVEPATCH=y
58CONFIG_MARCH_Z196=y
59CONFIG_TUNE_ZEC12=y 57CONFIG_TUNE_ZEC12=y
60CONFIG_NR_CPUS=256 58CONFIG_NR_CPUS=256
61CONFIG_NUMA=y 59CONFIG_NUMA=y
@@ -65,6 +63,15 @@ CONFIG_MEMORY_HOTPLUG=y
65CONFIG_MEMORY_HOTREMOVE=y 63CONFIG_MEMORY_HOTREMOVE=y
66CONFIG_KSM=y 64CONFIG_KSM=y
67CONFIG_TRANSPARENT_HUGEPAGE=y 65CONFIG_TRANSPARENT_HUGEPAGE=y
66CONFIG_CLEANCACHE=y
67CONFIG_FRONTSWAP=y
68CONFIG_CMA=y
69CONFIG_MEM_SOFT_DIRTY=y
70CONFIG_ZPOOL=m
71CONFIG_ZBUD=m
72CONFIG_ZSMALLOC=m
73CONFIG_ZSMALLOC_STAT=y
74CONFIG_IDLE_PAGE_TRACKING=y
68CONFIG_PCI=y 75CONFIG_PCI=y
69CONFIG_PCI_DEBUG=y 76CONFIG_PCI_DEBUG=y
70CONFIG_HOTPLUG_PCI=y 77CONFIG_HOTPLUG_PCI=y
@@ -452,6 +459,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
452CONFIG_RAW_DRIVER=m 459CONFIG_RAW_DRIVER=m
453CONFIG_HANGCHECK_TIMER=m 460CONFIG_HANGCHECK_TIMER=m
454CONFIG_TN3270_FS=y 461CONFIG_TN3270_FS=y
462# CONFIG_HWMON is not set
455CONFIG_WATCHDOG=y 463CONFIG_WATCHDOG=y
456CONFIG_WATCHDOG_NOWAYOUT=y 464CONFIG_WATCHDOG_NOWAYOUT=y
457CONFIG_SOFT_WATCHDOG=m 465CONFIG_SOFT_WATCHDOG=m
@@ -537,6 +545,8 @@ CONFIG_DLM=m
537CONFIG_PRINTK_TIME=y 545CONFIG_PRINTK_TIME=y
538CONFIG_DYNAMIC_DEBUG=y 546CONFIG_DYNAMIC_DEBUG=y
539CONFIG_DEBUG_INFO=y 547CONFIG_DEBUG_INFO=y
548CONFIG_DEBUG_INFO_DWARF4=y
549CONFIG_GDB_SCRIPTS=y
540CONFIG_FRAME_WARN=1024 550CONFIG_FRAME_WARN=1024
541CONFIG_READABLE_ASM=y 551CONFIG_READABLE_ASM=y
542CONFIG_UNUSED_SYMBOLS=y 552CONFIG_UNUSED_SYMBOLS=y
@@ -555,13 +565,17 @@ CONFIG_SLUB_DEBUG_ON=y
555CONFIG_SLUB_STATS=y 565CONFIG_SLUB_STATS=y
556CONFIG_DEBUG_STACK_USAGE=y 566CONFIG_DEBUG_STACK_USAGE=y
557CONFIG_DEBUG_VM=y 567CONFIG_DEBUG_VM=y
568CONFIG_DEBUG_VM_VMACACHE=y
558CONFIG_DEBUG_VM_RB=y 569CONFIG_DEBUG_VM_RB=y
570CONFIG_DEBUG_VM_PGFLAGS=y
559CONFIG_DEBUG_MEMORY_INIT=y 571CONFIG_DEBUG_MEMORY_INIT=y
560CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m 572CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
561CONFIG_DEBUG_PER_CPU_MAPS=y 573CONFIG_DEBUG_PER_CPU_MAPS=y
562CONFIG_DEBUG_SHIRQ=y 574CONFIG_DEBUG_SHIRQ=y
563CONFIG_DETECT_HUNG_TASK=y 575CONFIG_DETECT_HUNG_TASK=y
576CONFIG_WQ_WATCHDOG=y
564CONFIG_PANIC_ON_OOPS=y 577CONFIG_PANIC_ON_OOPS=y
578CONFIG_DEBUG_TIMEKEEPING=y
565CONFIG_TIMER_STATS=y 579CONFIG_TIMER_STATS=y
566CONFIG_DEBUG_RT_MUTEXES=y 580CONFIG_DEBUG_RT_MUTEXES=y
567CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y 581CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
@@ -596,6 +610,8 @@ CONFIG_FTRACE_SYSCALLS=y
596CONFIG_STACK_TRACER=y 610CONFIG_STACK_TRACER=y
597CONFIG_BLK_DEV_IO_TRACE=y 611CONFIG_BLK_DEV_IO_TRACE=y
598CONFIG_UPROBE_EVENT=y 612CONFIG_UPROBE_EVENT=y
613CONFIG_FUNCTION_PROFILER=y
614CONFIG_TRACE_ENUM_MAP_FILE=y
599CONFIG_LKDTM=m 615CONFIG_LKDTM=m
600CONFIG_TEST_LIST_SORT=y 616CONFIG_TEST_LIST_SORT=y
601CONFIG_KPROBES_SANITY_TEST=y 617CONFIG_KPROBES_SANITY_TEST=y
@@ -607,7 +623,6 @@ CONFIG_TEST_STRING_HELPERS=y
607CONFIG_TEST_KSTRTOX=y 623CONFIG_TEST_KSTRTOX=y
608CONFIG_DMA_API_DEBUG=y 624CONFIG_DMA_API_DEBUG=y
609CONFIG_TEST_BPF=m 625CONFIG_TEST_BPF=m
610# CONFIG_STRICT_DEVMEM is not set
611CONFIG_S390_PTDUMP=y 626CONFIG_S390_PTDUMP=y
612CONFIG_ENCRYPTED_KEYS=m 627CONFIG_ENCRYPTED_KEYS=m
613CONFIG_SECURITY=y 628CONFIG_SECURITY=y
@@ -651,7 +666,6 @@ CONFIG_CRYPTO_SEED=m
651CONFIG_CRYPTO_SERPENT=m 666CONFIG_CRYPTO_SERPENT=m
652CONFIG_CRYPTO_TEA=m 667CONFIG_CRYPTO_TEA=m
653CONFIG_CRYPTO_TWOFISH=m 668CONFIG_CRYPTO_TWOFISH=m
654CONFIG_CRYPTO_ZLIB=y
655CONFIG_CRYPTO_LZO=m 669CONFIG_CRYPTO_LZO=m
656CONFIG_CRYPTO_LZ4=m 670CONFIG_CRYPTO_LZ4=m
657CONFIG_CRYPTO_LZ4HC=m 671CONFIG_CRYPTO_LZ4HC=m
@@ -664,7 +678,7 @@ CONFIG_CRYPTO_SHA512_S390=m
664CONFIG_CRYPTO_DES_S390=m 678CONFIG_CRYPTO_DES_S390=m
665CONFIG_CRYPTO_AES_S390=m 679CONFIG_CRYPTO_AES_S390=m
666CONFIG_CRYPTO_GHASH_S390=m 680CONFIG_CRYPTO_GHASH_S390=m
667CONFIG_ASYMMETRIC_KEY_TYPE=m 681CONFIG_ASYMMETRIC_KEY_TYPE=y
668CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m 682CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
669CONFIG_X509_CERTIFICATE_PARSER=m 683CONFIG_X509_CERTIFICATE_PARSER=m
670CONFIG_CRC7=m 684CONFIG_CRC7=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index a31dcd56f7c0..f46a35115d2d 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -1,8 +1,7 @@
1CONFIG_SYSVIPC=y 1CONFIG_SYSVIPC=y
2CONFIG_POSIX_MQUEUE=y 2CONFIG_POSIX_MQUEUE=y
3CONFIG_FHANDLE=y
4CONFIG_AUDIT=y 3CONFIG_AUDIT=y
5CONFIG_NO_HZ=y 4CONFIG_NO_HZ_IDLE=y
6CONFIG_HIGH_RES_TIMERS=y 5CONFIG_HIGH_RES_TIMERS=y
7CONFIG_BSD_PROCESS_ACCT=y 6CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_BSD_PROCESS_ACCT_V3=y 7CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -13,17 +12,17 @@ CONFIG_TASK_IO_ACCOUNTING=y
13CONFIG_IKCONFIG=y 12CONFIG_IKCONFIG=y
14CONFIG_IKCONFIG_PROC=y 13CONFIG_IKCONFIG_PROC=y
15CONFIG_NUMA_BALANCING=y 14CONFIG_NUMA_BALANCING=y
16CONFIG_CGROUP_FREEZER=y
17CONFIG_CGROUP_PIDS=y
18CONFIG_CGROUP_DEVICE=y
19CONFIG_CPUSETS=y
20CONFIG_CGROUP_CPUACCT=y
21CONFIG_MEMCG=y 15CONFIG_MEMCG=y
22CONFIG_MEMCG_SWAP=y 16CONFIG_MEMCG_SWAP=y
23CONFIG_MEMCG_KMEM=y 17CONFIG_BLK_CGROUP=y
18CONFIG_CGROUP_PIDS=y
19CONFIG_CGROUP_FREEZER=y
24CONFIG_CGROUP_HUGETLB=y 20CONFIG_CGROUP_HUGETLB=y
21CONFIG_CPUSETS=y
22CONFIG_CGROUP_DEVICE=y
23CONFIG_CGROUP_CPUACCT=y
25CONFIG_CGROUP_PERF=y 24CONFIG_CGROUP_PERF=y
26CONFIG_BLK_CGROUP=y 25CONFIG_CHECKPOINT_RESTORE=y
27CONFIG_NAMESPACES=y 26CONFIG_NAMESPACES=y
28CONFIG_USER_NS=y 27CONFIG_USER_NS=y
29CONFIG_SCHED_AUTOGROUP=y 28CONFIG_SCHED_AUTOGROUP=y
@@ -53,7 +52,6 @@ CONFIG_SOLARIS_X86_PARTITION=y
53CONFIG_UNIXWARE_DISKLABEL=y 52CONFIG_UNIXWARE_DISKLABEL=y
54CONFIG_CFQ_GROUP_IOSCHED=y 53CONFIG_CFQ_GROUP_IOSCHED=y
55CONFIG_DEFAULT_DEADLINE=y 54CONFIG_DEFAULT_DEADLINE=y
56CONFIG_MARCH_Z196=y
57CONFIG_TUNE_ZEC12=y 55CONFIG_TUNE_ZEC12=y
58CONFIG_NR_CPUS=256 56CONFIG_NR_CPUS=256
59CONFIG_NUMA=y 57CONFIG_NUMA=y
@@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y
62CONFIG_MEMORY_HOTREMOVE=y 60CONFIG_MEMORY_HOTREMOVE=y
63CONFIG_KSM=y 61CONFIG_KSM=y
64CONFIG_TRANSPARENT_HUGEPAGE=y 62CONFIG_TRANSPARENT_HUGEPAGE=y
63CONFIG_CLEANCACHE=y
64CONFIG_FRONTSWAP=y
65CONFIG_CMA=y
66CONFIG_ZSWAP=y
67CONFIG_ZBUD=m
68CONFIG_ZSMALLOC=m
69CONFIG_ZSMALLOC_STAT=y
70CONFIG_IDLE_PAGE_TRACKING=y
65CONFIG_PCI=y 71CONFIG_PCI=y
66CONFIG_HOTPLUG_PCI=y 72CONFIG_HOTPLUG_PCI=y
67CONFIG_HOTPLUG_PCI_S390=y 73CONFIG_HOTPLUG_PCI_S390=y
@@ -530,6 +536,8 @@ CONFIG_NLS_UTF8=m
530CONFIG_DLM=m 536CONFIG_DLM=m
531CONFIG_PRINTK_TIME=y 537CONFIG_PRINTK_TIME=y
532CONFIG_DEBUG_INFO=y 538CONFIG_DEBUG_INFO=y
539CONFIG_DEBUG_INFO_DWARF4=y
540CONFIG_GDB_SCRIPTS=y
533# CONFIG_ENABLE_MUST_CHECK is not set 541# CONFIG_ENABLE_MUST_CHECK is not set
534CONFIG_FRAME_WARN=1024 542CONFIG_FRAME_WARN=1024
535CONFIG_UNUSED_SYMBOLS=y 543CONFIG_UNUSED_SYMBOLS=y
@@ -547,13 +555,13 @@ CONFIG_LATENCYTOP=y
547CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y 555CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
548CONFIG_BLK_DEV_IO_TRACE=y 556CONFIG_BLK_DEV_IO_TRACE=y
549# CONFIG_KPROBE_EVENT is not set 557# CONFIG_KPROBE_EVENT is not set
558CONFIG_TRACE_ENUM_MAP_FILE=y
550CONFIG_LKDTM=m 559CONFIG_LKDTM=m
551CONFIG_RBTREE_TEST=m 560CONFIG_RBTREE_TEST=m
552CONFIG_INTERVAL_TREE_TEST=m 561CONFIG_INTERVAL_TREE_TEST=m
553CONFIG_PERCPU_TEST=m 562CONFIG_PERCPU_TEST=m
554CONFIG_ATOMIC64_SELFTEST=y 563CONFIG_ATOMIC64_SELFTEST=y
555CONFIG_TEST_BPF=m 564CONFIG_TEST_BPF=m
556# CONFIG_STRICT_DEVMEM is not set
557CONFIG_S390_PTDUMP=y 565CONFIG_S390_PTDUMP=y
558CONFIG_ENCRYPTED_KEYS=m 566CONFIG_ENCRYPTED_KEYS=m
559CONFIG_SECURITY=y 567CONFIG_SECURITY=y
@@ -597,8 +605,6 @@ CONFIG_CRYPTO_SEED=m
597CONFIG_CRYPTO_SERPENT=m 605CONFIG_CRYPTO_SERPENT=m
598CONFIG_CRYPTO_TEA=m 606CONFIG_CRYPTO_TEA=m
599CONFIG_CRYPTO_TWOFISH=m 607CONFIG_CRYPTO_TWOFISH=m
600CONFIG_CRYPTO_ZLIB=y
601CONFIG_CRYPTO_LZO=m
602CONFIG_CRYPTO_LZ4=m 608CONFIG_CRYPTO_LZ4=m
603CONFIG_CRYPTO_LZ4HC=m 609CONFIG_CRYPTO_LZ4HC=m
604CONFIG_CRYPTO_USER_API_HASH=m 610CONFIG_CRYPTO_USER_API_HASH=m
@@ -610,7 +616,7 @@ CONFIG_CRYPTO_SHA512_S390=m
610CONFIG_CRYPTO_DES_S390=m 616CONFIG_CRYPTO_DES_S390=m
611CONFIG_CRYPTO_AES_S390=m 617CONFIG_CRYPTO_AES_S390=m
612CONFIG_CRYPTO_GHASH_S390=m 618CONFIG_CRYPTO_GHASH_S390=m
613CONFIG_ASYMMETRIC_KEY_TYPE=m 619CONFIG_ASYMMETRIC_KEY_TYPE=y
614CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m 620CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
615CONFIG_X509_CERTIFICATE_PARSER=m 621CONFIG_X509_CERTIFICATE_PARSER=m
616CONFIG_CRC7=m 622CONFIG_CRC7=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 7b73bf353345..ba0f2a58b8cd 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -1,8 +1,7 @@
1CONFIG_SYSVIPC=y 1CONFIG_SYSVIPC=y
2CONFIG_POSIX_MQUEUE=y 2CONFIG_POSIX_MQUEUE=y
3CONFIG_FHANDLE=y
4CONFIG_AUDIT=y 3CONFIG_AUDIT=y
5CONFIG_NO_HZ=y 4CONFIG_NO_HZ_IDLE=y
6CONFIG_HIGH_RES_TIMERS=y 5CONFIG_HIGH_RES_TIMERS=y
7CONFIG_BSD_PROCESS_ACCT=y 6CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_BSD_PROCESS_ACCT_V3=y 7CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -14,17 +13,17 @@ CONFIG_IKCONFIG=y
14CONFIG_IKCONFIG_PROC=y 13CONFIG_IKCONFIG_PROC=y
15CONFIG_NUMA_BALANCING=y 14CONFIG_NUMA_BALANCING=y
16# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set 15# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
17CONFIG_CGROUP_FREEZER=y
18CONFIG_CGROUP_PIDS=y
19CONFIG_CGROUP_DEVICE=y
20CONFIG_CPUSETS=y
21CONFIG_CGROUP_CPUACCT=y
22CONFIG_MEMCG=y 16CONFIG_MEMCG=y
23CONFIG_MEMCG_SWAP=y 17CONFIG_MEMCG_SWAP=y
24CONFIG_MEMCG_KMEM=y 18CONFIG_BLK_CGROUP=y
19CONFIG_CGROUP_PIDS=y
20CONFIG_CGROUP_FREEZER=y
25CONFIG_CGROUP_HUGETLB=y 21CONFIG_CGROUP_HUGETLB=y
22CONFIG_CPUSETS=y
23CONFIG_CGROUP_DEVICE=y
24CONFIG_CGROUP_CPUACCT=y
26CONFIG_CGROUP_PERF=y 25CONFIG_CGROUP_PERF=y
27CONFIG_BLK_CGROUP=y 26CONFIG_CHECKPOINT_RESTORE=y
28CONFIG_NAMESPACES=y 27CONFIG_NAMESPACES=y
29CONFIG_USER_NS=y 28CONFIG_USER_NS=y
30CONFIG_SCHED_AUTOGROUP=y 29CONFIG_SCHED_AUTOGROUP=y
@@ -53,7 +52,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
53CONFIG_CFQ_GROUP_IOSCHED=y 52CONFIG_CFQ_GROUP_IOSCHED=y
54CONFIG_DEFAULT_DEADLINE=y 53CONFIG_DEFAULT_DEADLINE=y
55CONFIG_LIVEPATCH=y 54CONFIG_LIVEPATCH=y
56CONFIG_MARCH_Z196=y
57CONFIG_TUNE_ZEC12=y 55CONFIG_TUNE_ZEC12=y
58CONFIG_NR_CPUS=512 56CONFIG_NR_CPUS=512
59CONFIG_NUMA=y 57CONFIG_NUMA=y
@@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y
62CONFIG_MEMORY_HOTREMOVE=y 60CONFIG_MEMORY_HOTREMOVE=y
63CONFIG_KSM=y 61CONFIG_KSM=y
64CONFIG_TRANSPARENT_HUGEPAGE=y 62CONFIG_TRANSPARENT_HUGEPAGE=y
63CONFIG_CLEANCACHE=y
64CONFIG_FRONTSWAP=y
65CONFIG_CMA=y
66CONFIG_ZSWAP=y
67CONFIG_ZBUD=m
68CONFIG_ZSMALLOC=m
69CONFIG_ZSMALLOC_STAT=y
70CONFIG_IDLE_PAGE_TRACKING=y
65CONFIG_PCI=y 71CONFIG_PCI=y
66CONFIG_HOTPLUG_PCI=y 72CONFIG_HOTPLUG_PCI=y
67CONFIG_HOTPLUG_PCI_S390=y 73CONFIG_HOTPLUG_PCI_S390=y
@@ -447,6 +453,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
447CONFIG_RAW_DRIVER=m 453CONFIG_RAW_DRIVER=m
448CONFIG_HANGCHECK_TIMER=m 454CONFIG_HANGCHECK_TIMER=m
449CONFIG_TN3270_FS=y 455CONFIG_TN3270_FS=y
456# CONFIG_HWMON is not set
450CONFIG_WATCHDOG=y 457CONFIG_WATCHDOG=y
451CONFIG_WATCHDOG_NOWAYOUT=y 458CONFIG_WATCHDOG_NOWAYOUT=y
452CONFIG_SOFT_WATCHDOG=m 459CONFIG_SOFT_WATCHDOG=m
@@ -530,6 +537,8 @@ CONFIG_NLS_UTF8=m
530CONFIG_DLM=m 537CONFIG_DLM=m
531CONFIG_PRINTK_TIME=y 538CONFIG_PRINTK_TIME=y
532CONFIG_DEBUG_INFO=y 539CONFIG_DEBUG_INFO=y
540CONFIG_DEBUG_INFO_DWARF4=y
541CONFIG_GDB_SCRIPTS=y
533# CONFIG_ENABLE_MUST_CHECK is not set 542# CONFIG_ENABLE_MUST_CHECK is not set
534CONFIG_FRAME_WARN=1024 543CONFIG_FRAME_WARN=1024
535CONFIG_UNUSED_SYMBOLS=y 544CONFIG_UNUSED_SYMBOLS=y
@@ -546,11 +555,12 @@ CONFIG_FTRACE_SYSCALLS=y
546CONFIG_STACK_TRACER=y 555CONFIG_STACK_TRACER=y
547CONFIG_BLK_DEV_IO_TRACE=y 556CONFIG_BLK_DEV_IO_TRACE=y
548CONFIG_UPROBE_EVENT=y 557CONFIG_UPROBE_EVENT=y
558CONFIG_FUNCTION_PROFILER=y
559CONFIG_TRACE_ENUM_MAP_FILE=y
549CONFIG_LKDTM=m 560CONFIG_LKDTM=m
550CONFIG_PERCPU_TEST=m 561CONFIG_PERCPU_TEST=m
551CONFIG_ATOMIC64_SELFTEST=y 562CONFIG_ATOMIC64_SELFTEST=y
552CONFIG_TEST_BPF=m 563CONFIG_TEST_BPF=m
553# CONFIG_STRICT_DEVMEM is not set
554CONFIG_S390_PTDUMP=y 564CONFIG_S390_PTDUMP=y
555CONFIG_ENCRYPTED_KEYS=m 565CONFIG_ENCRYPTED_KEYS=m
556CONFIG_SECURITY=y 566CONFIG_SECURITY=y
@@ -594,8 +604,6 @@ CONFIG_CRYPTO_SEED=m
594CONFIG_CRYPTO_SERPENT=m 604CONFIG_CRYPTO_SERPENT=m
595CONFIG_CRYPTO_TEA=m 605CONFIG_CRYPTO_TEA=m
596CONFIG_CRYPTO_TWOFISH=m 606CONFIG_CRYPTO_TWOFISH=m
597CONFIG_CRYPTO_ZLIB=y
598CONFIG_CRYPTO_LZO=m
599CONFIG_CRYPTO_LZ4=m 607CONFIG_CRYPTO_LZ4=m
600CONFIG_CRYPTO_LZ4HC=m 608CONFIG_CRYPTO_LZ4HC=m
601CONFIG_CRYPTO_USER_API_HASH=m 609CONFIG_CRYPTO_USER_API_HASH=m
@@ -607,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m
607CONFIG_CRYPTO_DES_S390=m 615CONFIG_CRYPTO_DES_S390=m
608CONFIG_CRYPTO_AES_S390=m 616CONFIG_CRYPTO_AES_S390=m
609CONFIG_CRYPTO_GHASH_S390=m 617CONFIG_CRYPTO_GHASH_S390=m
610CONFIG_ASYMMETRIC_KEY_TYPE=m 618CONFIG_ASYMMETRIC_KEY_TYPE=y
611CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m 619CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
612CONFIG_X509_CERTIFICATE_PARSER=m 620CONFIG_X509_CERTIFICATE_PARSER=m
613CONFIG_CRC7=m 621CONFIG_CRC7=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 1719843a55a2..4366a3e3e754 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -1,5 +1,5 @@
1# CONFIG_SWAP is not set 1# CONFIG_SWAP is not set
2CONFIG_NO_HZ=y 2CONFIG_NO_HZ_IDLE=y
3CONFIG_HIGH_RES_TIMERS=y 3CONFIG_HIGH_RES_TIMERS=y
4CONFIG_BLK_DEV_INITRD=y 4CONFIG_BLK_DEV_INITRD=y
5CONFIG_CC_OPTIMIZE_FOR_SIZE=y 5CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -7,7 +7,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
7CONFIG_PARTITION_ADVANCED=y 7CONFIG_PARTITION_ADVANCED=y
8CONFIG_IBM_PARTITION=y 8CONFIG_IBM_PARTITION=y
9CONFIG_DEFAULT_DEADLINE=y 9CONFIG_DEFAULT_DEADLINE=y
10CONFIG_MARCH_Z196=y
11CONFIG_TUNE_ZEC12=y 10CONFIG_TUNE_ZEC12=y
12# CONFIG_COMPAT is not set 11# CONFIG_COMPAT is not set
13CONFIG_NR_CPUS=2 12CONFIG_NR_CPUS=2
@@ -64,7 +63,6 @@ CONFIG_PANIC_ON_OOPS=y
64# CONFIG_SCHED_DEBUG is not set 63# CONFIG_SCHED_DEBUG is not set
65CONFIG_RCU_CPU_STALL_TIMEOUT=60 64CONFIG_RCU_CPU_STALL_TIMEOUT=60
66# CONFIG_FTRACE is not set 65# CONFIG_FTRACE is not set
67# CONFIG_STRICT_DEVMEM is not set
68# CONFIG_PFAULT is not set 66# CONFIG_PFAULT is not set
69# CONFIG_S390_HYPFS_FS is not set 67# CONFIG_S390_HYPFS_FS is not set
70# CONFIG_VIRTUALIZATION is not set 68# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index e24f2af4c73b..3f571ea89509 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,8 +1,8 @@
1CONFIG_SYSVIPC=y 1CONFIG_SYSVIPC=y
2CONFIG_POSIX_MQUEUE=y 2CONFIG_POSIX_MQUEUE=y
3CONFIG_FHANDLE=y 3CONFIG_USELIB=y
4CONFIG_AUDIT=y 4CONFIG_AUDIT=y
5CONFIG_NO_HZ=y 5CONFIG_NO_HZ_IDLE=y
6CONFIG_HIGH_RES_TIMERS=y 6CONFIG_HIGH_RES_TIMERS=y
7CONFIG_TASKSTATS=y 7CONFIG_TASKSTATS=y
8CONFIG_TASK_DELAY_ACCT=y 8CONFIG_TASK_DELAY_ACCT=y
@@ -11,19 +11,19 @@ CONFIG_TASK_IO_ACCOUNTING=y
11CONFIG_IKCONFIG=y 11CONFIG_IKCONFIG=y
12CONFIG_IKCONFIG_PROC=y 12CONFIG_IKCONFIG_PROC=y
13CONFIG_CGROUPS=y 13CONFIG_CGROUPS=y
14CONFIG_CGROUP_FREEZER=y
15CONFIG_CGROUP_PIDS=y
16CONFIG_CGROUP_DEVICE=y
17CONFIG_CPUSETS=y
18CONFIG_CGROUP_CPUACCT=y
19CONFIG_MEMCG=y 14CONFIG_MEMCG=y
20CONFIG_MEMCG_SWAP=y 15CONFIG_MEMCG_SWAP=y
21CONFIG_MEMCG_KMEM=y 16CONFIG_BLK_CGROUP=y
22CONFIG_CGROUP_HUGETLB=y
23CONFIG_CGROUP_PERF=y
24CONFIG_CGROUP_SCHED=y 17CONFIG_CGROUP_SCHED=y
25CONFIG_RT_GROUP_SCHED=y 18CONFIG_RT_GROUP_SCHED=y
26CONFIG_BLK_CGROUP=y 19CONFIG_CGROUP_PIDS=y
20CONFIG_CGROUP_FREEZER=y
21CONFIG_CGROUP_HUGETLB=y
22CONFIG_CPUSETS=y
23CONFIG_CGROUP_DEVICE=y
24CONFIG_CGROUP_CPUACCT=y
25CONFIG_CGROUP_PERF=y
26CONFIG_CHECKPOINT_RESTORE=y
27CONFIG_NAMESPACES=y 27CONFIG_NAMESPACES=y
28CONFIG_USER_NS=y 28CONFIG_USER_NS=y
29CONFIG_BLK_DEV_INITRD=y 29CONFIG_BLK_DEV_INITRD=y
@@ -44,7 +44,6 @@ CONFIG_PARTITION_ADVANCED=y
44CONFIG_IBM_PARTITION=y 44CONFIG_IBM_PARTITION=y
45CONFIG_DEFAULT_DEADLINE=y 45CONFIG_DEFAULT_DEADLINE=y
46CONFIG_LIVEPATCH=y 46CONFIG_LIVEPATCH=y
47CONFIG_MARCH_Z196=y
48CONFIG_NR_CPUS=256 47CONFIG_NR_CPUS=256
49CONFIG_NUMA=y 48CONFIG_NUMA=y
50CONFIG_HZ_100=y 49CONFIG_HZ_100=y
@@ -52,6 +51,14 @@ CONFIG_MEMORY_HOTPLUG=y
52CONFIG_MEMORY_HOTREMOVE=y 51CONFIG_MEMORY_HOTREMOVE=y
53CONFIG_KSM=y 52CONFIG_KSM=y
54CONFIG_TRANSPARENT_HUGEPAGE=y 53CONFIG_TRANSPARENT_HUGEPAGE=y
54CONFIG_CLEANCACHE=y
55CONFIG_FRONTSWAP=y
56CONFIG_CMA=y
57CONFIG_ZSWAP=y
58CONFIG_ZBUD=m
59CONFIG_ZSMALLOC=m
60CONFIG_ZSMALLOC_STAT=y
61CONFIG_IDLE_PAGE_TRACKING=y
55CONFIG_CRASH_DUMP=y 62CONFIG_CRASH_DUMP=y
56CONFIG_BINFMT_MISC=m 63CONFIG_BINFMT_MISC=m
57CONFIG_HIBERNATION=y 64CONFIG_HIBERNATION=y
@@ -61,7 +68,6 @@ CONFIG_UNIX=y
61CONFIG_NET_KEY=y 68CONFIG_NET_KEY=y
62CONFIG_INET=y 69CONFIG_INET=y
63CONFIG_IP_MULTICAST=y 70CONFIG_IP_MULTICAST=y
64# CONFIG_INET_LRO is not set
65CONFIG_L2TP=m 71CONFIG_L2TP=m
66CONFIG_L2TP_DEBUGFS=m 72CONFIG_L2TP_DEBUGFS=m
67CONFIG_VLAN_8021Q=y 73CONFIG_VLAN_8021Q=y
@@ -144,6 +150,9 @@ CONFIG_TMPFS=y
144CONFIG_TMPFS_POSIX_ACL=y 150CONFIG_TMPFS_POSIX_ACL=y
145CONFIG_HUGETLBFS=y 151CONFIG_HUGETLBFS=y
146# CONFIG_NETWORK_FILESYSTEMS is not set 152# CONFIG_NETWORK_FILESYSTEMS is not set
153CONFIG_DEBUG_INFO=y
154CONFIG_DEBUG_INFO_DWARF4=y
155CONFIG_GDB_SCRIPTS=y
147CONFIG_UNUSED_SYMBOLS=y 156CONFIG_UNUSED_SYMBOLS=y
148CONFIG_DEBUG_SECTION_MISMATCH=y 157CONFIG_DEBUG_SECTION_MISMATCH=y
149CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y 158CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
@@ -158,20 +167,21 @@ CONFIG_LOCK_STAT=y
158CONFIG_DEBUG_LOCKDEP=y 167CONFIG_DEBUG_LOCKDEP=y
159CONFIG_DEBUG_ATOMIC_SLEEP=y 168CONFIG_DEBUG_ATOMIC_SLEEP=y
160CONFIG_DEBUG_LIST=y 169CONFIG_DEBUG_LIST=y
161CONFIG_DEBUG_PI_LIST=y
162CONFIG_DEBUG_SG=y 170CONFIG_DEBUG_SG=y
163CONFIG_DEBUG_NOTIFIERS=y 171CONFIG_DEBUG_NOTIFIERS=y
164CONFIG_RCU_CPU_STALL_TIMEOUT=60 172CONFIG_RCU_CPU_STALL_TIMEOUT=60
165CONFIG_RCU_TRACE=y 173CONFIG_RCU_TRACE=y
166CONFIG_LATENCYTOP=y 174CONFIG_LATENCYTOP=y
167CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y 175CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
168CONFIG_TRACER_SNAPSHOT=y 176CONFIG_SCHED_TRACER=y
177CONFIG_FTRACE_SYSCALLS=y
169CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y 178CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
170CONFIG_STACK_TRACER=y 179CONFIG_STACK_TRACER=y
171CONFIG_BLK_DEV_IO_TRACE=y 180CONFIG_BLK_DEV_IO_TRACE=y
172CONFIG_UPROBE_EVENT=y 181CONFIG_UPROBE_EVENT=y
182CONFIG_FUNCTION_PROFILER=y
183CONFIG_TRACE_ENUM_MAP_FILE=y
173CONFIG_KPROBES_SANITY_TEST=y 184CONFIG_KPROBES_SANITY_TEST=y
174# CONFIG_STRICT_DEVMEM is not set
175CONFIG_S390_PTDUMP=y 185CONFIG_S390_PTDUMP=y
176CONFIG_CRYPTO_CRYPTD=m 186CONFIG_CRYPTO_CRYPTD=m
177CONFIG_CRYPTO_AUTHENC=m 187CONFIG_CRYPTO_AUTHENC=m
@@ -212,8 +222,6 @@ CONFIG_CRYPTO_SERPENT=m
212CONFIG_CRYPTO_TEA=m 222CONFIG_CRYPTO_TEA=m
213CONFIG_CRYPTO_TWOFISH=m 223CONFIG_CRYPTO_TWOFISH=m
214CONFIG_CRYPTO_DEFLATE=m 224CONFIG_CRYPTO_DEFLATE=m
215CONFIG_CRYPTO_ZLIB=m
216CONFIG_CRYPTO_LZO=m
217CONFIG_CRYPTO_LZ4=m 225CONFIG_CRYPTO_LZ4=m
218CONFIG_CRYPTO_LZ4HC=m 226CONFIG_CRYPTO_LZ4HC=m
219CONFIG_CRYPTO_ANSI_CPRNG=m 227CONFIG_CRYPTO_ANSI_CPRNG=m
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 7a3144017301..19288c1b36d3 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -250,6 +250,7 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
250 250
251 report_user_fault(regs, SIGSEGV, 1); 251 report_user_fault(regs, SIGSEGV, 1);
252 si.si_signo = SIGSEGV; 252 si.si_signo = SIGSEGV;
253 si.si_errno = 0;
253 si.si_code = si_code; 254 si.si_code = si_code;
254 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); 255 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
255 force_sig_info(SIGSEGV, &si, current); 256 force_sig_info(SIGSEGV, &si, current);
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
index f010c93a88b1..fda605dbc1b4 100644
--- a/arch/s390/net/bpf_jit.h
+++ b/arch/s390/net/bpf_jit.h
@@ -37,7 +37,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
37 * | | | 37 * | | |
38 * +---------------+ | 38 * +---------------+ |
39 * | 8 byte skbp | | 39 * | 8 byte skbp | |
40 * R15+170 -> +---------------+ | 40 * R15+176 -> +---------------+ |
41 * | 8 byte hlen | | 41 * | 8 byte hlen | |
42 * R15+168 -> +---------------+ | 42 * R15+168 -> +---------------+ |
43 * | 4 byte align | | 43 * | 4 byte align | |
@@ -58,7 +58,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
58#define STK_OFF (STK_SPACE - STK_160_UNUSED) 58#define STK_OFF (STK_SPACE - STK_160_UNUSED)
59#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */ 59#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
60#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */ 60#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
61#define STK_OFF_SKBP 170 /* Offset of SKB pointer on stack */ 61#define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */
62 62
63#define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */ 63#define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */
64#define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */ 64#define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 9133b0ec000b..bee281f3163d 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -45,7 +45,7 @@ struct bpf_jit {
45 int labels[1]; /* Labels for local jumps */ 45 int labels[1]; /* Labels for local jumps */
46}; 46};
47 47
48#define BPF_SIZE_MAX 0x7ffff /* Max size for program (20 bit signed displ) */ 48#define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */
49 49
50#define SEEN_SKB 1 /* skb access */ 50#define SEEN_SKB 1 /* skb access */
51#define SEEN_MEM 2 /* use mem[] for temporary storage */ 51#define SEEN_MEM 2 /* use mem[] for temporary storage */
@@ -450,7 +450,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
450 emit_load_skb_data_hlen(jit); 450 emit_load_skb_data_hlen(jit);
451 if (jit->seen & SEEN_SKB_CHANGE) 451 if (jit->seen & SEEN_SKB_CHANGE)
452 /* stg %b1,ST_OFF_SKBP(%r0,%r15) */ 452 /* stg %b1,ST_OFF_SKBP(%r0,%r15) */
453 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, 453 EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
454 STK_OFF_SKBP); 454 STK_OFF_SKBP);
455} 455}
456 456
diff --git a/arch/sparc/include/asm/head_64.h b/arch/sparc/include/asm/head_64.h
index 10e9dabc4c41..f0700cfeedd7 100644
--- a/arch/sparc/include/asm/head_64.h
+++ b/arch/sparc/include/asm/head_64.h
@@ -15,6 +15,10 @@
15 15
16#define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ) 16#define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ)
17 17
18#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
19#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
20#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
21
18#define __CHEETAH_ID 0x003e0014 22#define __CHEETAH_ID 0x003e0014
19#define __JALAPENO_ID 0x003e0016 23#define __JALAPENO_ID 0x003e0016
20#define __SERRANO_ID 0x003e0022 24#define __SERRANO_ID 0x003e0022
diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h
index 71b5a67522ab..781b9f1dbdc2 100644
--- a/arch/sparc/include/asm/ttable.h
+++ b/arch/sparc/include/asm/ttable.h
@@ -589,8 +589,8 @@ user_rtt_fill_64bit: \
589 restored; \ 589 restored; \
590 nop; nop; nop; nop; nop; nop; \ 590 nop; nop; nop; nop; nop; nop; \
591 nop; nop; nop; nop; nop; \ 591 nop; nop; nop; nop; nop; \
592 ba,a,pt %xcc, user_rtt_fill_fixup; \ 592 ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
593 ba,a,pt %xcc, user_rtt_fill_fixup; \ 593 ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
594 ba,a,pt %xcc, user_rtt_fill_fixup; 594 ba,a,pt %xcc, user_rtt_fill_fixup;
595 595
596 596
@@ -652,8 +652,8 @@ user_rtt_fill_32bit: \
652 restored; \ 652 restored; \
653 nop; nop; nop; nop; nop; \ 653 nop; nop; nop; nop; nop; \
654 nop; nop; nop; \ 654 nop; nop; nop; \
655 ba,a,pt %xcc, user_rtt_fill_fixup; \ 655 ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
656 ba,a,pt %xcc, user_rtt_fill_fixup; \ 656 ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
657 ba,a,pt %xcc, user_rtt_fill_fixup; 657 ba,a,pt %xcc, user_rtt_fill_fixup;
658 658
659 659
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 7cf9c6ea3f1f..fdb13327fded 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg
21CFLAGS_REMOVE_pcr.o := -pg 21CFLAGS_REMOVE_pcr.o := -pg
22endif 22endif
23 23
24obj-$(CONFIG_SPARC64) += urtt_fill.o
24obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o 25obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o
25obj-$(CONFIG_SPARC32) += etrap_32.o 26obj-$(CONFIG_SPARC32) += etrap_32.o
26obj-$(CONFIG_SPARC32) += rtrap_32.o 27obj-$(CONFIG_SPARC32) += rtrap_32.o
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index d08bdaffdbfc..216948ca4382 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -14,10 +14,6 @@
14#include <asm/visasm.h> 14#include <asm/visasm.h>
15#include <asm/processor.h> 15#include <asm/processor.h>
16 16
17#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
18#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
19#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
20
21#ifdef CONFIG_CONTEXT_TRACKING 17#ifdef CONFIG_CONTEXT_TRACKING
22# define SCHEDULE_USER schedule_user 18# define SCHEDULE_USER schedule_user
23#else 19#else
@@ -242,52 +238,17 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
242 wrpr %g1, %cwp 238 wrpr %g1, %cwp
243 ba,a,pt %xcc, user_rtt_fill_64bit 239 ba,a,pt %xcc, user_rtt_fill_64bit
244 240
245user_rtt_fill_fixup: 241user_rtt_fill_fixup_dax:
246 rdpr %cwp, %g1 242 ba,pt %xcc, user_rtt_fill_fixup_common
247 add %g1, 1, %g1 243 mov 1, %g3
248 wrpr %g1, 0x0, %cwp
249
250 rdpr %wstate, %g2
251 sll %g2, 3, %g2
252 wrpr %g2, 0x0, %wstate
253
254 /* We know %canrestore and %otherwin are both zero. */
255
256 sethi %hi(sparc64_kern_pri_context), %g2
257 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
258 mov PRIMARY_CONTEXT, %g1
259
260661: stxa %g2, [%g1] ASI_DMMU
261 .section .sun4v_1insn_patch, "ax"
262 .word 661b
263 stxa %g2, [%g1] ASI_MMU
264 .previous
265
266 sethi %hi(KERNBASE), %g1
267 flush %g1
268 244
269 or %g4, FAULT_CODE_WINFIXUP, %g4 245user_rtt_fill_fixup_mna:
270 stb %g4, [%g6 + TI_FAULT_CODE] 246 ba,pt %xcc, user_rtt_fill_fixup_common
271 stx %g5, [%g6 + TI_FAULT_ADDR] 247 mov 2, %g3
272 248
273 mov %g6, %l1 249user_rtt_fill_fixup:
274 wrpr %g0, 0x0, %tl 250 ba,pt %xcc, user_rtt_fill_fixup_common
275 251 clr %g3
276661: nop
277 .section .sun4v_1insn_patch, "ax"
278 .word 661b
279 SET_GL(0)
280 .previous
281
282 wrpr %g0, RTRAP_PSTATE, %pstate
283
284 mov %l1, %g6
285 ldx [%g6 + TI_TASK], %g4
286 LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
287 call do_sparc64_fault
288 add %sp, PTREGS_OFF, %o0
289 ba,pt %xcc, rtrap
290 nop
291 252
292user_rtt_pre_restore: 253user_rtt_pre_restore:
293 add %g1, 1, %g1 254 add %g1, 1, %g1
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 3c25241fa5cb..91cc2f4ae4d9 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -138,12 +138,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
138 return 0; 138 return 0;
139} 139}
140 140
141/* Checks if the fp is valid. We always build signal frames which are
142 * 16-byte aligned, therefore we can always enforce that the restore
143 * frame has that property as well.
144 */
145static bool invalid_frame_pointer(void __user *fp, int fplen)
146{
147 if ((((unsigned long) fp) & 15) ||
148 ((unsigned long)fp) > 0x100000000ULL - fplen)
149 return true;
150 return false;
151}
152
141void do_sigreturn32(struct pt_regs *regs) 153void do_sigreturn32(struct pt_regs *regs)
142{ 154{
143 struct signal_frame32 __user *sf; 155 struct signal_frame32 __user *sf;
144 compat_uptr_t fpu_save; 156 compat_uptr_t fpu_save;
145 compat_uptr_t rwin_save; 157 compat_uptr_t rwin_save;
146 unsigned int psr; 158 unsigned int psr, ufp;
147 unsigned int pc, npc; 159 unsigned int pc, npc;
148 sigset_t set; 160 sigset_t set;
149 compat_sigset_t seta; 161 compat_sigset_t seta;
@@ -158,11 +170,16 @@ void do_sigreturn32(struct pt_regs *regs)
158 sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP]; 170 sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
159 171
160 /* 1. Make sure we are not getting garbage from the user */ 172 /* 1. Make sure we are not getting garbage from the user */
161 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || 173 if (invalid_frame_pointer(sf, sizeof(*sf)))
162 (((unsigned long) sf) & 3)) 174 goto segv;
175
176 if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
177 goto segv;
178
179 if (ufp & 0x7)
163 goto segv; 180 goto segv;
164 181
165 if (get_user(pc, &sf->info.si_regs.pc) || 182 if (__get_user(pc, &sf->info.si_regs.pc) ||
166 __get_user(npc, &sf->info.si_regs.npc)) 183 __get_user(npc, &sf->info.si_regs.npc))
167 goto segv; 184 goto segv;
168 185
@@ -227,7 +244,7 @@ segv:
227asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) 244asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
228{ 245{
229 struct rt_signal_frame32 __user *sf; 246 struct rt_signal_frame32 __user *sf;
230 unsigned int psr, pc, npc; 247 unsigned int psr, pc, npc, ufp;
231 compat_uptr_t fpu_save; 248 compat_uptr_t fpu_save;
232 compat_uptr_t rwin_save; 249 compat_uptr_t rwin_save;
233 sigset_t set; 250 sigset_t set;
@@ -242,11 +259,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
242 sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP]; 259 sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
243 260
244 /* 1. Make sure we are not getting garbage from the user */ 261 /* 1. Make sure we are not getting garbage from the user */
245 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || 262 if (invalid_frame_pointer(sf, sizeof(*sf)))
246 (((unsigned long) sf) & 3))
247 goto segv; 263 goto segv;
248 264
249 if (get_user(pc, &sf->regs.pc) || 265 if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
266 goto segv;
267
268 if (ufp & 0x7)
269 goto segv;
270
271 if (__get_user(pc, &sf->regs.pc) ||
250 __get_user(npc, &sf->regs.npc)) 272 __get_user(npc, &sf->regs.npc))
251 goto segv; 273 goto segv;
252 274
@@ -307,14 +329,6 @@ segv:
307 force_sig(SIGSEGV, current); 329 force_sig(SIGSEGV, current);
308} 330}
309 331
310/* Checks if the fp is valid */
311static int invalid_frame_pointer(void __user *fp, int fplen)
312{
313 if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
314 return 1;
315 return 0;
316}
317
318static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) 332static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
319{ 333{
320 unsigned long sp; 334 unsigned long sp;
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 52aa5e4ce5e7..c3c12efe0bc0 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -60,10 +60,22 @@ struct rt_signal_frame {
60#define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7))) 60#define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7)))
61#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) 61#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
62 62
63/* Checks if the fp is valid. We always build signal frames which are
64 * 16-byte aligned, therefore we can always enforce that the restore
65 * frame has that property as well.
66 */
67static inline bool invalid_frame_pointer(void __user *fp, int fplen)
68{
69 if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen))
70 return true;
71
72 return false;
73}
74
63asmlinkage void do_sigreturn(struct pt_regs *regs) 75asmlinkage void do_sigreturn(struct pt_regs *regs)
64{ 76{
77 unsigned long up_psr, pc, npc, ufp;
65 struct signal_frame __user *sf; 78 struct signal_frame __user *sf;
66 unsigned long up_psr, pc, npc;
67 sigset_t set; 79 sigset_t set;
68 __siginfo_fpu_t __user *fpu_save; 80 __siginfo_fpu_t __user *fpu_save;
69 __siginfo_rwin_t __user *rwin_save; 81 __siginfo_rwin_t __user *rwin_save;
@@ -77,10 +89,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
77 sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; 89 sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
78 90
79 /* 1. Make sure we are not getting garbage from the user */ 91 /* 1. Make sure we are not getting garbage from the user */
80 if (!access_ok(VERIFY_READ, sf, sizeof(*sf))) 92 if (!invalid_frame_pointer(sf, sizeof(*sf)))
93 goto segv_and_exit;
94
95 if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
81 goto segv_and_exit; 96 goto segv_and_exit;
82 97
83 if (((unsigned long) sf) & 3) 98 if (ufp & 0x7)
84 goto segv_and_exit; 99 goto segv_and_exit;
85 100
86 err = __get_user(pc, &sf->info.si_regs.pc); 101 err = __get_user(pc, &sf->info.si_regs.pc);
@@ -127,7 +142,7 @@ segv_and_exit:
127asmlinkage void do_rt_sigreturn(struct pt_regs *regs) 142asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
128{ 143{
129 struct rt_signal_frame __user *sf; 144 struct rt_signal_frame __user *sf;
130 unsigned int psr, pc, npc; 145 unsigned int psr, pc, npc, ufp;
131 __siginfo_fpu_t __user *fpu_save; 146 __siginfo_fpu_t __user *fpu_save;
132 __siginfo_rwin_t __user *rwin_save; 147 __siginfo_rwin_t __user *rwin_save;
133 sigset_t set; 148 sigset_t set;
@@ -135,8 +150,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
135 150
136 synchronize_user_stack(); 151 synchronize_user_stack();
137 sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; 152 sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
138 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || 153 if (!invalid_frame_pointer(sf, sizeof(*sf)))
139 (((unsigned long) sf) & 0x03)) 154 goto segv;
155
156 if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
157 goto segv;
158
159 if (ufp & 0x7)
140 goto segv; 160 goto segv;
141 161
142 err = __get_user(pc, &sf->regs.pc); 162 err = __get_user(pc, &sf->regs.pc);
@@ -178,15 +198,6 @@ segv:
178 force_sig(SIGSEGV, current); 198 force_sig(SIGSEGV, current);
179} 199}
180 200
181/* Checks if the fp is valid */
182static inline int invalid_frame_pointer(void __user *fp, int fplen)
183{
184 if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
185 return 1;
186
187 return 0;
188}
189
190static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) 201static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
191{ 202{
192 unsigned long sp = regs->u_regs[UREG_FP]; 203 unsigned long sp = regs->u_regs[UREG_FP];
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 39aaec173f66..5ee930c48f4c 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -234,6 +234,17 @@ do_sigsegv:
234 goto out; 234 goto out;
235} 235}
236 236
237/* Checks if the fp is valid. We always build rt signal frames which
238 * are 16-byte aligned, therefore we can always enforce that the
239 * restore frame has that property as well.
240 */
241static bool invalid_frame_pointer(void __user *fp)
242{
243 if (((unsigned long) fp) & 15)
244 return true;
245 return false;
246}
247
237struct rt_signal_frame { 248struct rt_signal_frame {
238 struct sparc_stackf ss; 249 struct sparc_stackf ss;
239 siginfo_t info; 250 siginfo_t info;
@@ -246,8 +257,8 @@ struct rt_signal_frame {
246 257
247void do_rt_sigreturn(struct pt_regs *regs) 258void do_rt_sigreturn(struct pt_regs *regs)
248{ 259{
260 unsigned long tpc, tnpc, tstate, ufp;
249 struct rt_signal_frame __user *sf; 261 struct rt_signal_frame __user *sf;
250 unsigned long tpc, tnpc, tstate;
251 __siginfo_fpu_t __user *fpu_save; 262 __siginfo_fpu_t __user *fpu_save;
252 __siginfo_rwin_t __user *rwin_save; 263 __siginfo_rwin_t __user *rwin_save;
253 sigset_t set; 264 sigset_t set;
@@ -261,10 +272,16 @@ void do_rt_sigreturn(struct pt_regs *regs)
261 (regs->u_regs [UREG_FP] + STACK_BIAS); 272 (regs->u_regs [UREG_FP] + STACK_BIAS);
262 273
263 /* 1. Make sure we are not getting garbage from the user */ 274 /* 1. Make sure we are not getting garbage from the user */
264 if (((unsigned long) sf) & 3) 275 if (invalid_frame_pointer(sf))
276 goto segv;
277
278 if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
265 goto segv; 279 goto segv;
266 280
267 err = get_user(tpc, &sf->regs.tpc); 281 if ((ufp + STACK_BIAS) & 0x7)
282 goto segv;
283
284 err = __get_user(tpc, &sf->regs.tpc);
268 err |= __get_user(tnpc, &sf->regs.tnpc); 285 err |= __get_user(tnpc, &sf->regs.tnpc);
269 if (test_thread_flag(TIF_32BIT)) { 286 if (test_thread_flag(TIF_32BIT)) {
270 tpc &= 0xffffffff; 287 tpc &= 0xffffffff;
@@ -308,14 +325,6 @@ segv:
308 force_sig(SIGSEGV, current); 325 force_sig(SIGSEGV, current);
309} 326}
310 327
311/* Checks if the fp is valid */
312static int invalid_frame_pointer(void __user *fp)
313{
314 if (((unsigned long) fp) & 15)
315 return 1;
316 return 0;
317}
318
319static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) 328static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
320{ 329{
321 unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; 330 unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c
index 0f6eebe71e6c..e5fe8cef9a69 100644
--- a/arch/sparc/kernel/sigutil_32.c
+++ b/arch/sparc/kernel/sigutil_32.c
@@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
48int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) 48int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
49{ 49{
50 int err; 50 int err;
51
52 if (((unsigned long) fpu) & 3)
53 return -EFAULT;
54
51#ifdef CONFIG_SMP 55#ifdef CONFIG_SMP
52 if (test_tsk_thread_flag(current, TIF_USEDFPU)) 56 if (test_tsk_thread_flag(current, TIF_USEDFPU))
53 regs->psr &= ~PSR_EF; 57 regs->psr &= ~PSR_EF;
@@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
97 struct thread_info *t = current_thread_info(); 101 struct thread_info *t = current_thread_info();
98 int i, wsaved, err; 102 int i, wsaved, err;
99 103
100 __get_user(wsaved, &rp->wsaved); 104 if (((unsigned long) rp) & 3)
105 return -EFAULT;
106
107 get_user(wsaved, &rp->wsaved);
101 if (wsaved > NSWINS) 108 if (wsaved > NSWINS)
102 return -EFAULT; 109 return -EFAULT;
103 110
diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
index 387834a9c56a..36aadcbeac69 100644
--- a/arch/sparc/kernel/sigutil_64.c
+++ b/arch/sparc/kernel/sigutil_64.c
@@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
37 unsigned long fprs; 37 unsigned long fprs;
38 int err; 38 int err;
39 39
40 err = __get_user(fprs, &fpu->si_fprs); 40 if (((unsigned long) fpu) & 7)
41 return -EFAULT;
42
43 err = get_user(fprs, &fpu->si_fprs);
41 fprs_write(0); 44 fprs_write(0);
42 regs->tstate &= ~TSTATE_PEF; 45 regs->tstate &= ~TSTATE_PEF;
43 if (fprs & FPRS_DL) 46 if (fprs & FPRS_DL)
@@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
72 struct thread_info *t = current_thread_info(); 75 struct thread_info *t = current_thread_info();
73 int i, wsaved, err; 76 int i, wsaved, err;
74 77
75 __get_user(wsaved, &rp->wsaved); 78 if (((unsigned long) rp) & 7)
79 return -EFAULT;
80
81 get_user(wsaved, &rp->wsaved);
76 if (wsaved > NSWINS) 82 if (wsaved > NSWINS)
77 return -EFAULT; 83 return -EFAULT;
78 84
diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
new file mode 100644
index 000000000000..5604a2b051d4
--- /dev/null
+++ b/arch/sparc/kernel/urtt_fill.S
@@ -0,0 +1,98 @@
1#include <asm/thread_info.h>
2#include <asm/trap_block.h>
3#include <asm/spitfire.h>
4#include <asm/ptrace.h>
5#include <asm/head.h>
6
7 .text
8 .align 8
9 .globl user_rtt_fill_fixup_common
10user_rtt_fill_fixup_common:
11 rdpr %cwp, %g1
12 add %g1, 1, %g1
13 wrpr %g1, 0x0, %cwp
14
15 rdpr %wstate, %g2
16 sll %g2, 3, %g2
17 wrpr %g2, 0x0, %wstate
18
19 /* We know %canrestore and %otherwin are both zero. */
20
21 sethi %hi(sparc64_kern_pri_context), %g2
22 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
23 mov PRIMARY_CONTEXT, %g1
24
25661: stxa %g2, [%g1] ASI_DMMU
26 .section .sun4v_1insn_patch, "ax"
27 .word 661b
28 stxa %g2, [%g1] ASI_MMU
29 .previous
30
31 sethi %hi(KERNBASE), %g1
32 flush %g1
33
34 mov %g4, %l4
35 mov %g5, %l5
36 brnz,pn %g3, 1f
37 mov %g3, %l3
38
39 or %g4, FAULT_CODE_WINFIXUP, %g4
40 stb %g4, [%g6 + TI_FAULT_CODE]
41 stx %g5, [%g6 + TI_FAULT_ADDR]
421:
43 mov %g6, %l1
44 wrpr %g0, 0x0, %tl
45
46661: nop
47 .section .sun4v_1insn_patch, "ax"
48 .word 661b
49 SET_GL(0)
50 .previous
51
52 wrpr %g0, RTRAP_PSTATE, %pstate
53
54 mov %l1, %g6
55 ldx [%g6 + TI_TASK], %g4
56 LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
57
58 brnz,pn %l3, 1f
59 nop
60
61 call do_sparc64_fault
62 add %sp, PTREGS_OFF, %o0
63 ba,pt %xcc, rtrap
64 nop
65
661: cmp %g3, 2
67 bne,pn %xcc, 2f
68 nop
69
70 sethi %hi(tlb_type), %g1
71 lduw [%g1 + %lo(tlb_type)], %g1
72 cmp %g1, 3
73 bne,pt %icc, 1f
74 add %sp, PTREGS_OFF, %o0
75 mov %l4, %o2
76 call sun4v_do_mna
77 mov %l5, %o1
78 ba,a,pt %xcc, rtrap
791: mov %l4, %o1
80 mov %l5, %o2
81 call mem_address_unaligned
82 nop
83 ba,a,pt %xcc, rtrap
84
852: sethi %hi(tlb_type), %g1
86 mov %l4, %o1
87 lduw [%g1 + %lo(tlb_type)], %g1
88 mov %l5, %o2
89 cmp %g1, 3
90 bne,pt %icc, 1f
91 add %sp, PTREGS_OFF, %o0
92 call sun4v_data_access_exception
93 nop
94 ba,a,pt %xcc, rtrap
95
961: call spitfire_data_access_exception
97 nop
98 ba,a,pt %xcc, rtrap
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 652683cb4b4b..14bb0d5ed3c6 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2824,9 +2824,10 @@ void hugetlb_setup(struct pt_regs *regs)
2824 * the Data-TLB for huge pages. 2824 * the Data-TLB for huge pages.
2825 */ 2825 */
2826 if (tlb_type == cheetah_plus) { 2826 if (tlb_type == cheetah_plus) {
2827 bool need_context_reload = false;
2827 unsigned long ctx; 2828 unsigned long ctx;
2828 2829
2829 spin_lock(&ctx_alloc_lock); 2830 spin_lock_irq(&ctx_alloc_lock);
2830 ctx = mm->context.sparc64_ctx_val; 2831 ctx = mm->context.sparc64_ctx_val;
2831 ctx &= ~CTX_PGSZ_MASK; 2832 ctx &= ~CTX_PGSZ_MASK;
2832 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; 2833 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
@@ -2845,9 +2846,12 @@ void hugetlb_setup(struct pt_regs *regs)
2845 * also executing in this address space. 2846 * also executing in this address space.
2846 */ 2847 */
2847 mm->context.sparc64_ctx_val = ctx; 2848 mm->context.sparc64_ctx_val = ctx;
2848 on_each_cpu(context_reload, mm, 0); 2849 need_context_reload = true;
2849 } 2850 }
2850 spin_unlock(&ctx_alloc_lock); 2851 spin_unlock_irq(&ctx_alloc_lock);
2852
2853 if (need_context_reload)
2854 on_each_cpu(context_reload, mm, 0);
2851 } 2855 }
2852} 2856}
2853#endif 2857#endif
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index bca14c899137..757390eb562b 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -223,36 +223,19 @@ static void __init intel_remapping_check(int num, int slot, int func)
223 * despite the efforts of the "RAM buffer" approach, which simply rounds 223 * despite the efforts of the "RAM buffer" approach, which simply rounds
224 * memory boundaries up to 64M to try to catch space that may decode 224 * memory boundaries up to 64M to try to catch space that may decode
225 * as RAM and so is not suitable for MMIO. 225 * as RAM and so is not suitable for MMIO.
226 *
227 * And yes, so far on current devices the base addr is always under 4G.
228 */ 226 */
229static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_size)
230{
231 u32 base;
232
233 /*
234 * For the PCI IDs in this quirk, the stolen base is always
235 * in 0x5c, aka the BDSM register (yes that's really what
236 * it's called).
237 */
238 base = read_pci_config(num, slot, func, 0x5c);
239 base &= ~((1<<20) - 1);
240
241 return base;
242}
243 227
244#define KB(x) ((x) * 1024UL) 228#define KB(x) ((x) * 1024UL)
245#define MB(x) (KB (KB (x))) 229#define MB(x) (KB (KB (x)))
246#define GB(x) (MB (KB (x)))
247 230
248static size_t __init i830_tseg_size(void) 231static size_t __init i830_tseg_size(void)
249{ 232{
250 u8 tmp = read_pci_config_byte(0, 0, 0, I830_ESMRAMC); 233 u8 esmramc = read_pci_config_byte(0, 0, 0, I830_ESMRAMC);
251 234
252 if (!(tmp & TSEG_ENABLE)) 235 if (!(esmramc & TSEG_ENABLE))
253 return 0; 236 return 0;
254 237
255 if (tmp & I830_TSEG_SIZE_1M) 238 if (esmramc & I830_TSEG_SIZE_1M)
256 return MB(1); 239 return MB(1);
257 else 240 else
258 return KB(512); 241 return KB(512);
@@ -260,27 +243,26 @@ static size_t __init i830_tseg_size(void)
260 243
261static size_t __init i845_tseg_size(void) 244static size_t __init i845_tseg_size(void)
262{ 245{
263 u8 tmp = read_pci_config_byte(0, 0, 0, I845_ESMRAMC); 246 u8 esmramc = read_pci_config_byte(0, 0, 0, I845_ESMRAMC);
247 u8 tseg_size = esmramc & I845_TSEG_SIZE_MASK;
264 248
265 if (!(tmp & TSEG_ENABLE)) 249 if (!(esmramc & TSEG_ENABLE))
266 return 0; 250 return 0;
267 251
268 switch (tmp & I845_TSEG_SIZE_MASK) { 252 switch (tseg_size) {
269 case I845_TSEG_SIZE_512K: 253 case I845_TSEG_SIZE_512K: return KB(512);
270 return KB(512); 254 case I845_TSEG_SIZE_1M: return MB(1);
271 case I845_TSEG_SIZE_1M:
272 return MB(1);
273 default: 255 default:
274 WARN_ON(1); 256 WARN(1, "Unknown ESMRAMC value: %x!\n", esmramc);
275 return 0;
276 } 257 }
258 return 0;
277} 259}
278 260
279static size_t __init i85x_tseg_size(void) 261static size_t __init i85x_tseg_size(void)
280{ 262{
281 u8 tmp = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC); 263 u8 esmramc = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC);
282 264
283 if (!(tmp & TSEG_ENABLE)) 265 if (!(esmramc & TSEG_ENABLE))
284 return 0; 266 return 0;
285 267
286 return MB(1); 268 return MB(1);
@@ -300,285 +282,287 @@ static size_t __init i85x_mem_size(void)
300 * On 830/845/85x the stolen memory base isn't available in any 282 * On 830/845/85x the stolen memory base isn't available in any
301 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size. 283 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
302 */ 284 */
303static u32 __init i830_stolen_base(int num, int slot, int func, size_t stolen_size) 285static phys_addr_t __init i830_stolen_base(int num, int slot, int func,
286 size_t stolen_size)
304{ 287{
305 return i830_mem_size() - i830_tseg_size() - stolen_size; 288 return (phys_addr_t)i830_mem_size() - i830_tseg_size() - stolen_size;
306} 289}
307 290
308static u32 __init i845_stolen_base(int num, int slot, int func, size_t stolen_size) 291static phys_addr_t __init i845_stolen_base(int num, int slot, int func,
292 size_t stolen_size)
309{ 293{
310 return i830_mem_size() - i845_tseg_size() - stolen_size; 294 return (phys_addr_t)i830_mem_size() - i845_tseg_size() - stolen_size;
311} 295}
312 296
313static u32 __init i85x_stolen_base(int num, int slot, int func, size_t stolen_size) 297static phys_addr_t __init i85x_stolen_base(int num, int slot, int func,
298 size_t stolen_size)
314{ 299{
315 return i85x_mem_size() - i85x_tseg_size() - stolen_size; 300 return (phys_addr_t)i85x_mem_size() - i85x_tseg_size() - stolen_size;
316} 301}
317 302
318static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size) 303static phys_addr_t __init i865_stolen_base(int num, int slot, int func,
304 size_t stolen_size)
319{ 305{
306 u16 toud;
307
320 /* 308 /*
321 * FIXME is the graphics stolen memory region 309 * FIXME is the graphics stolen memory region
322 * always at TOUD? Ie. is it always the last 310 * always at TOUD? Ie. is it always the last
323 * one to be allocated by the BIOS? 311 * one to be allocated by the BIOS?
324 */ 312 */
325 return read_pci_config_16(0, 0, 0, I865_TOUD) << 16; 313 toud = read_pci_config_16(0, 0, 0, I865_TOUD);
314
315 return (phys_addr_t)toud << 16;
316}
317
318static phys_addr_t __init gen3_stolen_base(int num, int slot, int func,
319 size_t stolen_size)
320{
321 u32 bsm;
322
323 /* Almost universally we can find the Graphics Base of Stolen Memory
324 * at register BSM (0x5c) in the igfx configuration space. On a few
325 * (desktop) machines this is also mirrored in the bridge device at
326 * different locations, or in the MCHBAR.
327 */
328 bsm = read_pci_config(num, slot, func, INTEL_BSM);
329
330 return (phys_addr_t)bsm & INTEL_BSM_MASK;
326} 331}
327 332
328static size_t __init i830_stolen_size(int num, int slot, int func) 333static size_t __init i830_stolen_size(int num, int slot, int func)
329{ 334{
330 size_t stolen_size;
331 u16 gmch_ctrl; 335 u16 gmch_ctrl;
336 u16 gms;
332 337
333 gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); 338 gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
334 339 gms = gmch_ctrl & I830_GMCH_GMS_MASK;
335 switch (gmch_ctrl & I830_GMCH_GMS_MASK) { 340
336 case I830_GMCH_GMS_STOLEN_512: 341 switch (gms) {
337 stolen_size = KB(512); 342 case I830_GMCH_GMS_STOLEN_512: return KB(512);
338 break; 343 case I830_GMCH_GMS_STOLEN_1024: return MB(1);
339 case I830_GMCH_GMS_STOLEN_1024: 344 case I830_GMCH_GMS_STOLEN_8192: return MB(8);
340 stolen_size = MB(1); 345 /* local memory isn't part of the normal address space */
341 break; 346 case I830_GMCH_GMS_LOCAL: return 0;
342 case I830_GMCH_GMS_STOLEN_8192:
343 stolen_size = MB(8);
344 break;
345 case I830_GMCH_GMS_LOCAL:
346 /* local memory isn't part of the normal address space */
347 stolen_size = 0;
348 break;
349 default: 347 default:
350 return 0; 348 WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
351 } 349 }
352 350
353 return stolen_size; 351 return 0;
354} 352}
355 353
356static size_t __init gen3_stolen_size(int num, int slot, int func) 354static size_t __init gen3_stolen_size(int num, int slot, int func)
357{ 355{
358 size_t stolen_size;
359 u16 gmch_ctrl; 356 u16 gmch_ctrl;
357 u16 gms;
360 358
361 gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); 359 gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
362 360 gms = gmch_ctrl & I855_GMCH_GMS_MASK;
363 switch (gmch_ctrl & I855_GMCH_GMS_MASK) { 361
364 case I855_GMCH_GMS_STOLEN_1M: 362 switch (gms) {
365 stolen_size = MB(1); 363 case I855_GMCH_GMS_STOLEN_1M: return MB(1);
366 break; 364 case I855_GMCH_GMS_STOLEN_4M: return MB(4);
367 case I855_GMCH_GMS_STOLEN_4M: 365 case I855_GMCH_GMS_STOLEN_8M: return MB(8);
368 stolen_size = MB(4); 366 case I855_GMCH_GMS_STOLEN_16M: return MB(16);
369 break; 367 case I855_GMCH_GMS_STOLEN_32M: return MB(32);
370 case I855_GMCH_GMS_STOLEN_8M: 368 case I915_GMCH_GMS_STOLEN_48M: return MB(48);
371 stolen_size = MB(8); 369 case I915_GMCH_GMS_STOLEN_64M: return MB(64);
372 break; 370 case G33_GMCH_GMS_STOLEN_128M: return MB(128);
373 case I855_GMCH_GMS_STOLEN_16M: 371 case G33_GMCH_GMS_STOLEN_256M: return MB(256);
374 stolen_size = MB(16); 372 case INTEL_GMCH_GMS_STOLEN_96M: return MB(96);
375 break; 373 case INTEL_GMCH_GMS_STOLEN_160M:return MB(160);
376 case I855_GMCH_GMS_STOLEN_32M: 374 case INTEL_GMCH_GMS_STOLEN_224M:return MB(224);
377 stolen_size = MB(32); 375 case INTEL_GMCH_GMS_STOLEN_352M:return MB(352);
378 break;
379 case I915_GMCH_GMS_STOLEN_48M:
380 stolen_size = MB(48);
381 break;
382 case I915_GMCH_GMS_STOLEN_64M:
383 stolen_size = MB(64);
384 break;
385 case G33_GMCH_GMS_STOLEN_128M:
386 stolen_size = MB(128);
387 break;
388 case G33_GMCH_GMS_STOLEN_256M:
389 stolen_size = MB(256);
390 break;
391 case INTEL_GMCH_GMS_STOLEN_96M:
392 stolen_size = MB(96);
393 break;
394 case INTEL_GMCH_GMS_STOLEN_160M:
395 stolen_size = MB(160);
396 break;
397 case INTEL_GMCH_GMS_STOLEN_224M:
398 stolen_size = MB(224);
399 break;
400 case INTEL_GMCH_GMS_STOLEN_352M:
401 stolen_size = MB(352);
402 break;
403 default: 376 default:
404 stolen_size = 0; 377 WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
405 break;
406 } 378 }
407 379
408 return stolen_size; 380 return 0;
409} 381}
410 382
411static size_t __init gen6_stolen_size(int num, int slot, int func) 383static size_t __init gen6_stolen_size(int num, int slot, int func)
412{ 384{
413 u16 gmch_ctrl; 385 u16 gmch_ctrl;
386 u16 gms;
414 387
415 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 388 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
416 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; 389 gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
417 gmch_ctrl &= SNB_GMCH_GMS_MASK;
418 390
419 return gmch_ctrl << 25; /* 32 MB units */ 391 return (size_t)gms * MB(32);
420} 392}
421 393
422static size_t __init gen8_stolen_size(int num, int slot, int func) 394static size_t __init gen8_stolen_size(int num, int slot, int func)
423{ 395{
424 u16 gmch_ctrl; 396 u16 gmch_ctrl;
397 u16 gms;
425 398
426 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 399 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
427 gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; 400 gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
428 gmch_ctrl &= BDW_GMCH_GMS_MASK; 401
429 return gmch_ctrl << 25; /* 32 MB units */ 402 return (size_t)gms * MB(32);
430} 403}
431 404
432static size_t __init chv_stolen_size(int num, int slot, int func) 405static size_t __init chv_stolen_size(int num, int slot, int func)
433{ 406{
434 u16 gmch_ctrl; 407 u16 gmch_ctrl;
408 u16 gms;
435 409
436 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 410 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
437 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; 411 gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
438 gmch_ctrl &= SNB_GMCH_GMS_MASK;
439 412
440 /* 413 /*
441 * 0x0 to 0x10: 32MB increments starting at 0MB 414 * 0x0 to 0x10: 32MB increments starting at 0MB
442 * 0x11 to 0x16: 4MB increments starting at 8MB 415 * 0x11 to 0x16: 4MB increments starting at 8MB
443 * 0x17 to 0x1d: 4MB increments start at 36MB 416 * 0x17 to 0x1d: 4MB increments start at 36MB
444 */ 417 */
445 if (gmch_ctrl < 0x11) 418 if (gms < 0x11)
446 return gmch_ctrl << 25; 419 return (size_t)gms * MB(32);
447 else if (gmch_ctrl < 0x17) 420 else if (gms < 0x17)
448 return (gmch_ctrl - 0x11 + 2) << 22; 421 return (size_t)(gms - 0x11 + 2) * MB(4);
449 else 422 else
450 return (gmch_ctrl - 0x17 + 9) << 22; 423 return (size_t)(gms - 0x17 + 9) * MB(4);
451} 424}
452 425
453struct intel_stolen_funcs {
454 size_t (*size)(int num, int slot, int func);
455 u32 (*base)(int num, int slot, int func, size_t size);
456};
457
458static size_t __init gen9_stolen_size(int num, int slot, int func) 426static size_t __init gen9_stolen_size(int num, int slot, int func)
459{ 427{
460 u16 gmch_ctrl; 428 u16 gmch_ctrl;
429 u16 gms;
461 430
462 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 431 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
463 gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; 432 gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
464 gmch_ctrl &= BDW_GMCH_GMS_MASK;
465 433
466 if (gmch_ctrl < 0xf0) 434 /* 0x0 to 0xef: 32MB increments starting at 0MB */
467 return gmch_ctrl << 25; /* 32 MB units */ 435 /* 0xf0 to 0xfe: 4MB increments starting at 4MB */
436 if (gms < 0xf0)
437 return (size_t)gms * MB(32);
468 else 438 else
469 /* 4MB increments starting at 0xf0 for 4MB */ 439 return (size_t)(gms - 0xf0 + 1) * MB(4);
470 return (gmch_ctrl - 0xf0 + 1) << 22;
471} 440}
472 441
473typedef size_t (*stolen_size_fn)(int num, int slot, int func); 442struct intel_early_ops {
443 size_t (*stolen_size)(int num, int slot, int func);
444 phys_addr_t (*stolen_base)(int num, int slot, int func, size_t size);
445};
474 446
475static const struct intel_stolen_funcs i830_stolen_funcs __initconst = { 447static const struct intel_early_ops i830_early_ops __initconst = {
476 .base = i830_stolen_base, 448 .stolen_base = i830_stolen_base,
477 .size = i830_stolen_size, 449 .stolen_size = i830_stolen_size,
478}; 450};
479 451
480static const struct intel_stolen_funcs i845_stolen_funcs __initconst = { 452static const struct intel_early_ops i845_early_ops __initconst = {
481 .base = i845_stolen_base, 453 .stolen_base = i845_stolen_base,
482 .size = i830_stolen_size, 454 .stolen_size = i830_stolen_size,
483}; 455};
484 456
485static const struct intel_stolen_funcs i85x_stolen_funcs __initconst = { 457static const struct intel_early_ops i85x_early_ops __initconst = {
486 .base = i85x_stolen_base, 458 .stolen_base = i85x_stolen_base,
487 .size = gen3_stolen_size, 459 .stolen_size = gen3_stolen_size,
488}; 460};
489 461
490static const struct intel_stolen_funcs i865_stolen_funcs __initconst = { 462static const struct intel_early_ops i865_early_ops __initconst = {
491 .base = i865_stolen_base, 463 .stolen_base = i865_stolen_base,
492 .size = gen3_stolen_size, 464 .stolen_size = gen3_stolen_size,
493}; 465};
494 466
495static const struct intel_stolen_funcs gen3_stolen_funcs __initconst = { 467static const struct intel_early_ops gen3_early_ops __initconst = {
496 .base = intel_stolen_base, 468 .stolen_base = gen3_stolen_base,
497 .size = gen3_stolen_size, 469 .stolen_size = gen3_stolen_size,
498}; 470};
499 471
500static const struct intel_stolen_funcs gen6_stolen_funcs __initconst = { 472static const struct intel_early_ops gen6_early_ops __initconst = {
501 .base = intel_stolen_base, 473 .stolen_base = gen3_stolen_base,
502 .size = gen6_stolen_size, 474 .stolen_size = gen6_stolen_size,
503}; 475};
504 476
505static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = { 477static const struct intel_early_ops gen8_early_ops __initconst = {
506 .base = intel_stolen_base, 478 .stolen_base = gen3_stolen_base,
507 .size = gen8_stolen_size, 479 .stolen_size = gen8_stolen_size,
508}; 480};
509 481
510static const struct intel_stolen_funcs gen9_stolen_funcs __initconst = { 482static const struct intel_early_ops gen9_early_ops __initconst = {
511 .base = intel_stolen_base, 483 .stolen_base = gen3_stolen_base,
512 .size = gen9_stolen_size, 484 .stolen_size = gen9_stolen_size,
513}; 485};
514 486
515static const struct intel_stolen_funcs chv_stolen_funcs __initconst = { 487static const struct intel_early_ops chv_early_ops __initconst = {
516 .base = intel_stolen_base, 488 .stolen_base = gen3_stolen_base,
517 .size = chv_stolen_size, 489 .stolen_size = chv_stolen_size,
518}; 490};
519 491
520static const struct pci_device_id intel_stolen_ids[] __initconst = { 492static const struct pci_device_id intel_early_ids[] __initconst = {
521 INTEL_I830_IDS(&i830_stolen_funcs), 493 INTEL_I830_IDS(&i830_early_ops),
522 INTEL_I845G_IDS(&i845_stolen_funcs), 494 INTEL_I845G_IDS(&i845_early_ops),
523 INTEL_I85X_IDS(&i85x_stolen_funcs), 495 INTEL_I85X_IDS(&i85x_early_ops),
524 INTEL_I865G_IDS(&i865_stolen_funcs), 496 INTEL_I865G_IDS(&i865_early_ops),
525 INTEL_I915G_IDS(&gen3_stolen_funcs), 497 INTEL_I915G_IDS(&gen3_early_ops),
526 INTEL_I915GM_IDS(&gen3_stolen_funcs), 498 INTEL_I915GM_IDS(&gen3_early_ops),
527 INTEL_I945G_IDS(&gen3_stolen_funcs), 499 INTEL_I945G_IDS(&gen3_early_ops),
528 INTEL_I945GM_IDS(&gen3_stolen_funcs), 500 INTEL_I945GM_IDS(&gen3_early_ops),
529 INTEL_VLV_M_IDS(&gen6_stolen_funcs), 501 INTEL_VLV_M_IDS(&gen6_early_ops),
530 INTEL_VLV_D_IDS(&gen6_stolen_funcs), 502 INTEL_VLV_D_IDS(&gen6_early_ops),
531 INTEL_PINEVIEW_IDS(&gen3_stolen_funcs), 503 INTEL_PINEVIEW_IDS(&gen3_early_ops),
532 INTEL_I965G_IDS(&gen3_stolen_funcs), 504 INTEL_I965G_IDS(&gen3_early_ops),
533 INTEL_G33_IDS(&gen3_stolen_funcs), 505 INTEL_G33_IDS(&gen3_early_ops),
534 INTEL_I965GM_IDS(&gen3_stolen_funcs), 506 INTEL_I965GM_IDS(&gen3_early_ops),
535 INTEL_GM45_IDS(&gen3_stolen_funcs), 507 INTEL_GM45_IDS(&gen3_early_ops),
536 INTEL_G45_IDS(&gen3_stolen_funcs), 508 INTEL_G45_IDS(&gen3_early_ops),
537 INTEL_IRONLAKE_D_IDS(&gen3_stolen_funcs), 509 INTEL_IRONLAKE_D_IDS(&gen3_early_ops),
538 INTEL_IRONLAKE_M_IDS(&gen3_stolen_funcs), 510 INTEL_IRONLAKE_M_IDS(&gen3_early_ops),
539 INTEL_SNB_D_IDS(&gen6_stolen_funcs), 511 INTEL_SNB_D_IDS(&gen6_early_ops),
540 INTEL_SNB_M_IDS(&gen6_stolen_funcs), 512 INTEL_SNB_M_IDS(&gen6_early_ops),
541 INTEL_IVB_M_IDS(&gen6_stolen_funcs), 513 INTEL_IVB_M_IDS(&gen6_early_ops),
542 INTEL_IVB_D_IDS(&gen6_stolen_funcs), 514 INTEL_IVB_D_IDS(&gen6_early_ops),
543 INTEL_HSW_D_IDS(&gen6_stolen_funcs), 515 INTEL_HSW_D_IDS(&gen6_early_ops),
544 INTEL_HSW_M_IDS(&gen6_stolen_funcs), 516 INTEL_HSW_M_IDS(&gen6_early_ops),
545 INTEL_BDW_M_IDS(&gen8_stolen_funcs), 517 INTEL_BDW_M_IDS(&gen8_early_ops),
546 INTEL_BDW_D_IDS(&gen8_stolen_funcs), 518 INTEL_BDW_D_IDS(&gen8_early_ops),
547 INTEL_CHV_IDS(&chv_stolen_funcs), 519 INTEL_CHV_IDS(&chv_early_ops),
548 INTEL_SKL_IDS(&gen9_stolen_funcs), 520 INTEL_SKL_IDS(&gen9_early_ops),
549 INTEL_BXT_IDS(&gen9_stolen_funcs), 521 INTEL_BXT_IDS(&gen9_early_ops),
550 INTEL_KBL_IDS(&gen9_stolen_funcs), 522 INTEL_KBL_IDS(&gen9_early_ops),
551}; 523};
552 524
553static void __init intel_graphics_stolen(int num, int slot, int func) 525static void __init
526intel_graphics_stolen(int num, int slot, int func,
527 const struct intel_early_ops *early_ops)
554{ 528{
529 phys_addr_t base, end;
555 size_t size; 530 size_t size;
531
532 size = early_ops->stolen_size(num, slot, func);
533 base = early_ops->stolen_base(num, slot, func, size);
534
535 if (!size || !base)
536 return;
537
538 end = base + size - 1;
539 printk(KERN_INFO "Reserving Intel graphics memory at %pa-%pa\n",
540 &base, &end);
541
542 /* Mark this space as reserved */
543 e820_add_region(base, size, E820_RESERVED);
544 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
545}
546
547static void __init intel_graphics_quirks(int num, int slot, int func)
548{
549 const struct intel_early_ops *early_ops;
550 u16 device;
556 int i; 551 int i;
557 u32 start;
558 u16 device, subvendor, subdevice;
559 552
560 device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); 553 device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
561 subvendor = read_pci_config_16(num, slot, func, 554
562 PCI_SUBSYSTEM_VENDOR_ID); 555 for (i = 0; i < ARRAY_SIZE(intel_early_ids); i++) {
563 subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID); 556 kernel_ulong_t driver_data = intel_early_ids[i].driver_data;
564 557
565 for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) { 558 if (intel_early_ids[i].device != device)
566 if (intel_stolen_ids[i].device == device) { 559 continue;
567 const struct intel_stolen_funcs *stolen_funcs = 560
568 (const struct intel_stolen_funcs *)intel_stolen_ids[i].driver_data; 561 early_ops = (typeof(early_ops))driver_data;
569 size = stolen_funcs->size(num, slot, func); 562
570 start = stolen_funcs->base(num, slot, func, size); 563 intel_graphics_stolen(num, slot, func, early_ops);
571 if (size && start) { 564
572 printk(KERN_INFO "Reserving Intel graphics stolen memory at 0x%x-0x%x\n", 565 return;
573 start, start + (u32)size - 1);
574 /* Mark this space as reserved */
575 e820_add_region(start, size, E820_RESERVED);
576 sanitize_e820_map(e820.map,
577 ARRAY_SIZE(e820.map),
578 &e820.nr_map);
579 }
580 return;
581 }
582 } 566 }
583} 567}
584 568
@@ -627,7 +611,7 @@ static struct chipset early_qrk[] __initdata = {
627 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, 611 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
628 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 612 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
629 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID, 613 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
630 QFLAG_APPLY_ONCE, intel_graphics_stolen }, 614 QFLAG_APPLY_ONCE, intel_graphics_quirks },
631 /* 615 /*
632 * HPET on the current version of the Baytrail platform has accuracy 616 * HPET on the current version of the Baytrail platform has accuracy
633 * problems: it will halt in deep idle state - so we disable it. 617 * problems: it will halt in deep idle state - so we disable it.
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 769af907f824..7597b42a8a88 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -181,19 +181,22 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
181 struct kvm_cpuid_entry __user *entries) 181 struct kvm_cpuid_entry __user *entries)
182{ 182{
183 int r, i; 183 int r, i;
184 struct kvm_cpuid_entry *cpuid_entries; 184 struct kvm_cpuid_entry *cpuid_entries = NULL;
185 185
186 r = -E2BIG; 186 r = -E2BIG;
187 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 187 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
188 goto out; 188 goto out;
189 r = -ENOMEM; 189 r = -ENOMEM;
190 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent); 190 if (cpuid->nent) {
191 if (!cpuid_entries) 191 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
192 goto out; 192 cpuid->nent);
193 r = -EFAULT; 193 if (!cpuid_entries)
194 if (copy_from_user(cpuid_entries, entries, 194 goto out;
195 cpuid->nent * sizeof(struct kvm_cpuid_entry))) 195 r = -EFAULT;
196 goto out_free; 196 if (copy_from_user(cpuid_entries, entries,
197 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
198 goto out;
199 }
197 for (i = 0; i < cpuid->nent; i++) { 200 for (i = 0; i < cpuid->nent; i++) {
198 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; 201 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
199 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; 202 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
@@ -212,9 +215,8 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
212 kvm_x86_ops->cpuid_update(vcpu); 215 kvm_x86_ops->cpuid_update(vcpu);
213 r = kvm_update_cpuid(vcpu); 216 r = kvm_update_cpuid(vcpu);
214 217
215out_free:
216 vfree(cpuid_entries);
217out: 218out:
219 vfree(cpuid_entries);
218 return r; 220 return r;
219} 221}
220 222
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 24e800116ab4..def97b3a392b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -336,12 +336,12 @@ static gfn_t pse36_gfn_delta(u32 gpte)
336#ifdef CONFIG_X86_64 336#ifdef CONFIG_X86_64
337static void __set_spte(u64 *sptep, u64 spte) 337static void __set_spte(u64 *sptep, u64 spte)
338{ 338{
339 *sptep = spte; 339 WRITE_ONCE(*sptep, spte);
340} 340}
341 341
342static void __update_clear_spte_fast(u64 *sptep, u64 spte) 342static void __update_clear_spte_fast(u64 *sptep, u64 spte)
343{ 343{
344 *sptep = spte; 344 WRITE_ONCE(*sptep, spte);
345} 345}
346 346
347static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) 347static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
@@ -390,7 +390,7 @@ static void __set_spte(u64 *sptep, u64 spte)
390 */ 390 */
391 smp_wmb(); 391 smp_wmb();
392 392
393 ssptep->spte_low = sspte.spte_low; 393 WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
394} 394}
395 395
396static void __update_clear_spte_fast(u64 *sptep, u64 spte) 396static void __update_clear_spte_fast(u64 *sptep, u64 spte)
@@ -400,7 +400,7 @@ static void __update_clear_spte_fast(u64 *sptep, u64 spte)
400 ssptep = (union split_spte *)sptep; 400 ssptep = (union split_spte *)sptep;
401 sspte = (union split_spte)spte; 401 sspte = (union split_spte)spte;
402 402
403 ssptep->spte_low = sspte.spte_low; 403 WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
404 404
405 /* 405 /*
406 * If we map the spte from present to nonpresent, we should clear 406 * If we map the spte from present to nonpresent, we should clear
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c805cf494154..902d9da12392 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2314,6 +2314,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2314 case MSR_AMD64_NB_CFG: 2314 case MSR_AMD64_NB_CFG:
2315 case MSR_FAM10H_MMIO_CONF_BASE: 2315 case MSR_FAM10H_MMIO_CONF_BASE:
2316 case MSR_AMD64_BU_CFG2: 2316 case MSR_AMD64_BU_CFG2:
2317 case MSR_IA32_PERF_CTL:
2317 msr_info->data = 0; 2318 msr_info->data = 0;
2318 break; 2319 break;
2319 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 2320 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
@@ -2972,6 +2973,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2972 | KVM_VCPUEVENT_VALID_SMM)) 2973 | KVM_VCPUEVENT_VALID_SMM))
2973 return -EINVAL; 2974 return -EINVAL;
2974 2975
2976 if (events->exception.injected &&
2977 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
2978 return -EINVAL;
2979
2975 process_nmi(vcpu); 2980 process_nmi(vcpu);
2976 vcpu->arch.exception.pending = events->exception.injected; 2981 vcpu->arch.exception.pending = events->exception.injected;
2977 vcpu->arch.exception.nr = events->exception.nr; 2982 vcpu->arch.exception.nr = events->exception.nr;
@@ -3036,6 +3041,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
3036 if (dbgregs->flags) 3041 if (dbgregs->flags)
3037 return -EINVAL; 3042 return -EINVAL;
3038 3043
3044 if (dbgregs->dr6 & ~0xffffffffull)
3045 return -EINVAL;
3046 if (dbgregs->dr7 & ~0xffffffffull)
3047 return -EINVAL;
3048
3039 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); 3049 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
3040 kvm_update_dr0123(vcpu); 3050 kvm_update_dr0123(vcpu);
3041 vcpu->arch.dr6 = dbgregs->dr6; 3051 vcpu->arch.dr6 = dbgregs->dr6;
@@ -7815,7 +7825,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
7815 7825
7816 slot = id_to_memslot(slots, id); 7826 slot = id_to_memslot(slots, id);
7817 if (size) { 7827 if (size) {
7818 if (WARN_ON(slot->npages)) 7828 if (slot->npages)
7819 return -EEXIST; 7829 return -EEXIST;
7820 7830
7821 /* 7831 /*
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
index e28e912000a7..331f6baf2df8 100644
--- a/crypto/asymmetric_keys/Kconfig
+++ b/crypto/asymmetric_keys/Kconfig
@@ -13,6 +13,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
13 tristate "Asymmetric public-key crypto algorithm subtype" 13 tristate "Asymmetric public-key crypto algorithm subtype"
14 select MPILIB 14 select MPILIB
15 select CRYPTO_HASH_INFO 15 select CRYPTO_HASH_INFO
16 select CRYPTO_AKCIPHER
16 help 17 help
17 This option provides support for asymmetric public key type handling. 18 This option provides support for asymmetric public key type handling.
18 If signature generation and/or verification are to be used, 19 If signature generation and/or verification are to be used,
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 0d92d0f915e9..c7ba948d253c 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
331 pr->throttling.duty_width = acpi_gbl_FADT.duty_width; 331 pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
332 332
333 pr->pblk = object.processor.pblk_address; 333 pr->pblk = object.processor.pblk_address;
334
335 /*
336 * We don't care about error returns - we just try to mark
337 * these reserved so that nobody else is confused into thinking
338 * that this region might be unused..
339 *
340 * (In particular, allocating the IO range for Cardbus)
341 */
342 request_region(pr->throttling.address, 6, "ACPI CPU throttle");
343 } 334 }
344 335
345 /* 336 /*
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 3d5b8a099351..c1d138e128cb 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -754,7 +754,8 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device,
754} 754}
755 755
756int acpi_video_get_levels(struct acpi_device *device, 756int acpi_video_get_levels(struct acpi_device *device,
757 struct acpi_video_device_brightness **dev_br) 757 struct acpi_video_device_brightness **dev_br,
758 int *pmax_level)
758{ 759{
759 union acpi_object *obj = NULL; 760 union acpi_object *obj = NULL;
760 int i, max_level = 0, count = 0, level_ac_battery = 0; 761 int i, max_level = 0, count = 0, level_ac_battery = 0;
@@ -841,6 +842,8 @@ int acpi_video_get_levels(struct acpi_device *device,
841 842
842 br->count = count; 843 br->count = count;
843 *dev_br = br; 844 *dev_br = br;
845 if (pmax_level)
846 *pmax_level = max_level;
844 847
845out: 848out:
846 kfree(obj); 849 kfree(obj);
@@ -869,7 +872,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
869 struct acpi_video_device_brightness *br = NULL; 872 struct acpi_video_device_brightness *br = NULL;
870 int result = -EINVAL; 873 int result = -EINVAL;
871 874
872 result = acpi_video_get_levels(device->dev, &br); 875 result = acpi_video_get_levels(device->dev, &br, &max_level);
873 if (result) 876 if (result)
874 return result; 877 return result;
875 device->brightness = br; 878 device->brightness = br;
@@ -1737,7 +1740,7 @@ static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video)
1737 1740
1738 mutex_lock(&video->device_list_lock); 1741 mutex_lock(&video->device_list_lock);
1739 list_for_each_entry(dev, &video->video_device_list, entry) { 1742 list_for_each_entry(dev, &video->video_device_list, entry) {
1740 if (!acpi_video_device_lcd_query_levels(dev, &levels)) 1743 if (!acpi_video_device_lcd_query_levels(dev->dev->handle, &levels))
1741 kfree(levels); 1744 kfree(levels);
1742 } 1745 }
1743 mutex_unlock(&video->device_list_lock); 1746 mutex_unlock(&video->device_list_lock);
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 0f18dbc9a37f..daceb80022b0 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -83,27 +83,22 @@ acpi_hw_write_multiple(u32 value,
83static u8 83static u8
84acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width) 84acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width)
85{ 85{
86 u64 address;
87
88 if (!reg->access_width) { 86 if (!reg->access_width) {
87 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
88 max_bit_width = 32;
89 }
90
89 /* 91 /*
90 * Detect old register descriptors where only the bit_width field 92 * Detect old register descriptors where only the bit_width field
91 * makes senses. The target address is copied to handle possible 93 * makes senses.
92 * alignment issues.
93 */ 94 */
94 ACPI_MOVE_64_TO_64(&address, &reg->address); 95 if (reg->bit_width < max_bit_width &&
95 if (!reg->bit_offset && reg->bit_width && 96 !reg->bit_offset && reg->bit_width &&
96 ACPI_IS_POWER_OF_TWO(reg->bit_width) && 97 ACPI_IS_POWER_OF_TWO(reg->bit_width) &&
97 ACPI_IS_ALIGNED(reg->bit_width, 8) && 98 ACPI_IS_ALIGNED(reg->bit_width, 8)) {
98 ACPI_IS_ALIGNED(address, reg->bit_width)) {
99 return (reg->bit_width); 99 return (reg->bit_width);
100 } else {
101 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
102 return (32);
103 } else {
104 return (max_bit_width);
105 }
106 } 100 }
101 return (max_bit_width);
107 } else { 102 } else {
108 return (1 << (reg->access_width + 2)); 103 return (1 << (reg->access_width + 2));
109 } 104 }
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index f170d746336d..c72e64893d03 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
676 if (!pr->flags.throttling) 676 if (!pr->flags.throttling)
677 return -ENODEV; 677 return -ENODEV;
678 678
679 /*
680 * We don't care about error returns - we just try to mark
681 * these reserved so that nobody else is confused into thinking
682 * that this region might be unused..
683 *
684 * (In particular, allocating the IO range for Cardbus)
685 */
686 request_region(pr->throttling.address, 6, "ACPI CPU throttle");
687
679 pr->throttling.state = 0; 688 pr->throttling.state = 0;
680 689
681 duty_mask = pr->throttling.state_count - 1; 690 duty_mask = pr->throttling.state_count - 1;
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index a969a7e443be..85aaf2222587 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -181,13 +181,17 @@ static char *res_strings[] = {
181 "reserved 27", 181 "reserved 27",
182 "reserved 28", 182 "reserved 28",
183 "reserved 29", 183 "reserved 29",
184 "reserved 30", 184 "reserved 30", /* FIXME: The strings between 30-40 might be wrong. */
185 "reassembly abort: no buffers", 185 "reassembly abort: no buffers",
186 "receive buffer overflow", 186 "receive buffer overflow",
187 "change in GFC", 187 "change in GFC",
188 "receive buffer full", 188 "receive buffer full",
189 "low priority discard - no receive descriptor", 189 "low priority discard - no receive descriptor",
190 "low priority discard - missing end of packet", 190 "low priority discard - missing end of packet",
191 "reserved 37",
192 "reserved 38",
193 "reserved 39",
194 "reseverd 40",
191 "reserved 41", 195 "reserved 41",
192 "reserved 42", 196 "reserved 42",
193 "reserved 43", 197 "reserved 43",
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 7d00f2994738..809dd1e02091 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1128,7 +1128,7 @@ static int rx_pkt(struct atm_dev *dev)
1128 /* make the ptr point to the corresponding buffer desc entry */ 1128 /* make the ptr point to the corresponding buffer desc entry */
1129 buf_desc_ptr += desc; 1129 buf_desc_ptr += desc;
1130 if (!desc || (desc > iadev->num_rx_desc) || 1130 if (!desc || (desc > iadev->num_rx_desc) ||
1131 ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { 1131 ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
1132 free_desc(dev, desc); 1132 free_desc(dev, desc);
1133 IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);) 1133 IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1134 return -1; 1134 return -1;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 36bc11a106aa..9009295f5134 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1832,7 +1832,7 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
1832unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, 1832unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
1833 unsigned int target_freq) 1833 unsigned int target_freq)
1834{ 1834{
1835 clamp_val(target_freq, policy->min, policy->max); 1835 target_freq = clamp_val(target_freq, policy->min, policy->max);
1836 1836
1837 return cpufreq_driver->fast_switch(policy, target_freq); 1837 return cpufreq_driver->fast_switch(policy, target_freq);
1838} 1838}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 3a9c4325d6e2..0d159b513469 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -449,7 +449,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
449 cpu->acpi_perf_data.states[0].core_frequency = 449 cpu->acpi_perf_data.states[0].core_frequency =
450 policy->cpuinfo.max_freq / 1000; 450 policy->cpuinfo.max_freq / 1000;
451 cpu->valid_pss_table = true; 451 cpu->valid_pss_table = true;
452 pr_info("_PPC limits will be enforced\n"); 452 pr_debug("_PPC limits will be enforced\n");
453 453
454 return; 454 return;
455 455
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 52c7395cb8d8..0d0d4529ee36 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
122 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 122 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
123 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); 123 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
124 unsigned int unit; 124 unsigned int unit;
125 u32 unit_size;
125 int ret; 126 int ret;
126 127
127 if (!ctx->u.aes.key_len) 128 if (!ctx->u.aes.key_len)
@@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
133 if (!req->info) 134 if (!req->info)
134 return -EINVAL; 135 return -EINVAL;
135 136
136 for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) 137 unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
137 if (!(req->nbytes & (unit_size_map[unit].size - 1))) 138 if (req->nbytes <= unit_size_map[0].size) {
138 break; 139 for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
140 if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
141 unit_size = unit_size_map[unit].value;
142 break;
143 }
144 }
145 }
139 146
140 if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) || 147 if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
141 (ctx->u.aes.key_len != AES_KEYSIZE_128)) { 148 (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
142 /* Use the fallback to process the request for any 149 /* Use the fallback to process the request for any
143 * unsupported unit sizes or key sizes 150 * unsupported unit sizes or key sizes
@@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
158 rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; 165 rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
159 rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT 166 rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
160 : CCP_AES_ACTION_DECRYPT; 167 : CCP_AES_ACTION_DECRYPT;
161 rctx->cmd.u.xts.unit_size = unit_size_map[unit].value; 168 rctx->cmd.u.xts.unit_size = unit_size;
162 rctx->cmd.u.xts.key = &ctx->u.aes.key_sg; 169 rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
163 rctx->cmd.u.xts.key_len = ctx->u.aes.key_len; 170 rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
164 rctx->cmd.u.xts.iv = &rctx->iv_sg; 171 rctx->cmd.u.xts.iv = &rctx->iv_sg;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 6eefaa2fe58f..63464e86f2b1 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1986,7 +1986,7 @@ err_algs:
1986 &dd->pdata->algs_info[i].algs_list[j]); 1986 &dd->pdata->algs_info[i].algs_list[j]);
1987err_pm: 1987err_pm:
1988 pm_runtime_disable(dev); 1988 pm_runtime_disable(dev);
1989 if (dd->polling_mode) 1989 if (!dd->polling_mode)
1990 dma_release_channel(dd->dma_lch); 1990 dma_release_channel(dd->dma_lch);
1991data_err: 1991data_err:
1992 dev_err(dev, "initialization failed.\n"); 1992 dev_err(dev, "initialization failed.\n");
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 4a424eca75ed..f353db213a81 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,2 +1,2 @@
1obj-y := dma-buf.o fence.o reservation.o seqno-fence.o 1obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
2obj-$(CONFIG_SYNC_FILE) += sync_file.o 2obj-$(CONFIG_SYNC_FILE) += sync_file.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 4a2c07ee6677..6355ab38d630 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -33,6 +33,7 @@
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/poll.h> 34#include <linux/poll.h>
35#include <linux/reservation.h> 35#include <linux/reservation.h>
36#include <linux/mm.h>
36 37
37#include <uapi/linux/dma-buf.h> 38#include <uapi/linux/dma-buf.h>
38 39
@@ -90,7 +91,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
90 dmabuf = file->private_data; 91 dmabuf = file->private_data;
91 92
92 /* check for overflowing the buffer's size */ 93 /* check for overflowing the buffer's size */
93 if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > 94 if (vma->vm_pgoff + vma_pages(vma) >
94 dmabuf->size >> PAGE_SHIFT) 95 dmabuf->size >> PAGE_SHIFT)
95 return -EINVAL; 96 return -EINVAL;
96 97
@@ -723,11 +724,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
723 return -EINVAL; 724 return -EINVAL;
724 725
725 /* check for offset overflow */ 726 /* check for offset overflow */
726 if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff) 727 if (pgoff + vma_pages(vma) < pgoff)
727 return -EOVERFLOW; 728 return -EOVERFLOW;
728 729
729 /* check for overflowing the buffer's size */ 730 /* check for overflowing the buffer's size */
730 if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > 731 if (pgoff + vma_pages(vma) >
731 dmabuf->size >> PAGE_SHIFT) 732 dmabuf->size >> PAGE_SHIFT)
732 return -EINVAL; 733 return -EINVAL;
733 734
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c
new file mode 100644
index 000000000000..a8731c853da6
--- /dev/null
+++ b/drivers/dma-buf/fence-array.c
@@ -0,0 +1,144 @@
1/*
2 * fence-array: aggregate fences to be waited together
3 *
4 * Copyright (C) 2016 Collabora Ltd
5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 * Authors:
7 * Gustavo Padovan <gustavo@padovan.org>
8 * Christian König <christian.koenig@amd.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 */
19
20#include <linux/export.h>
21#include <linux/slab.h>
22#include <linux/fence-array.h>
23
24static void fence_array_cb_func(struct fence *f, struct fence_cb *cb);
25
26static const char *fence_array_get_driver_name(struct fence *fence)
27{
28 return "fence_array";
29}
30
31static const char *fence_array_get_timeline_name(struct fence *fence)
32{
33 return "unbound";
34}
35
36static void fence_array_cb_func(struct fence *f, struct fence_cb *cb)
37{
38 struct fence_array_cb *array_cb =
39 container_of(cb, struct fence_array_cb, cb);
40 struct fence_array *array = array_cb->array;
41
42 if (atomic_dec_and_test(&array->num_pending))
43 fence_signal(&array->base);
44 fence_put(&array->base);
45}
46
47static bool fence_array_enable_signaling(struct fence *fence)
48{
49 struct fence_array *array = to_fence_array(fence);
50 struct fence_array_cb *cb = (void *)(&array[1]);
51 unsigned i;
52
53 for (i = 0; i < array->num_fences; ++i) {
54 cb[i].array = array;
55 /*
56 * As we may report that the fence is signaled before all
57 * callbacks are complete, we need to take an additional
58 * reference count on the array so that we do not free it too
59 * early. The core fence handling will only hold the reference
60 * until we signal the array as complete (but that is now
61 * insufficient).
62 */
63 fence_get(&array->base);
64 if (fence_add_callback(array->fences[i], &cb[i].cb,
65 fence_array_cb_func)) {
66 fence_put(&array->base);
67 if (atomic_dec_and_test(&array->num_pending))
68 return false;
69 }
70 }
71
72 return true;
73}
74
75static bool fence_array_signaled(struct fence *fence)
76{
77 struct fence_array *array = to_fence_array(fence);
78
79 return atomic_read(&array->num_pending) <= 0;
80}
81
82static void fence_array_release(struct fence *fence)
83{
84 struct fence_array *array = to_fence_array(fence);
85 unsigned i;
86
87 for (i = 0; i < array->num_fences; ++i)
88 fence_put(array->fences[i]);
89
90 kfree(array->fences);
91 fence_free(fence);
92}
93
94const struct fence_ops fence_array_ops = {
95 .get_driver_name = fence_array_get_driver_name,
96 .get_timeline_name = fence_array_get_timeline_name,
97 .enable_signaling = fence_array_enable_signaling,
98 .signaled = fence_array_signaled,
99 .wait = fence_default_wait,
100 .release = fence_array_release,
101};
102
103/**
104 * fence_array_create - Create a custom fence array
105 * @num_fences: [in] number of fences to add in the array
106 * @fences: [in] array containing the fences
107 * @context: [in] fence context to use
108 * @seqno: [in] sequence number to use
109 * @signal_on_any [in] signal on any fence in the array
110 *
111 * Allocate a fence_array object and initialize the base fence with fence_init().
112 * In case of error it returns NULL.
113 *
114 * The caller should allocte the fences array with num_fences size
115 * and fill it with the fences it wants to add to the object. Ownership of this
116 * array is take and fence_put() is used on each fence on release.
117 *
118 * If @signal_on_any is true the fence array signals if any fence in the array
119 * signals, otherwise it signals when all fences in the array signal.
120 */
121struct fence_array *fence_array_create(int num_fences, struct fence **fences,
122 u64 context, unsigned seqno,
123 bool signal_on_any)
124{
125 struct fence_array *array;
126 size_t size = sizeof(*array);
127
128 /* Allocate the callback structures behind the array. */
129 size += num_fences * sizeof(struct fence_array_cb);
130 array = kzalloc(size, GFP_KERNEL);
131 if (!array)
132 return NULL;
133
134 spin_lock_init(&array->lock);
135 fence_init(&array->base, &fence_array_ops, &array->lock,
136 context, seqno);
137
138 array->num_fences = num_fences;
139 atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
140 array->fences = fences;
141
142 return array;
143}
144EXPORT_SYMBOL(fence_array_create);
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index 7b05dbe9b296..4d51f9e83fa8 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -35,7 +35,7 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit);
35 * context or not. One device can have multiple separate contexts, 35 * context or not. One device can have multiple separate contexts,
36 * and they're used if some engine can run independently of another. 36 * and they're used if some engine can run independently of another.
37 */ 37 */
38static atomic_t fence_context_counter = ATOMIC_INIT(0); 38static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
39 39
40/** 40/**
41 * fence_context_alloc - allocate an array of fence contexts 41 * fence_context_alloc - allocate an array of fence contexts
@@ -44,10 +44,10 @@ static atomic_t fence_context_counter = ATOMIC_INIT(0);
44 * This function will return the first index of the number of fences allocated. 44 * This function will return the first index of the number of fences allocated.
45 * The fence context is used for setting fence->context to a unique number. 45 * The fence context is used for setting fence->context to a unique number.
46 */ 46 */
47unsigned fence_context_alloc(unsigned num) 47u64 fence_context_alloc(unsigned num)
48{ 48{
49 BUG_ON(!num); 49 BUG_ON(!num);
50 return atomic_add_return(num, &fence_context_counter) - num; 50 return atomic64_add_return(num, &fence_context_counter) - num;
51} 51}
52EXPORT_SYMBOL(fence_context_alloc); 52EXPORT_SYMBOL(fence_context_alloc);
53 53
@@ -513,7 +513,7 @@ EXPORT_SYMBOL(fence_wait_any_timeout);
513 */ 513 */
514void 514void
515fence_init(struct fence *fence, const struct fence_ops *ops, 515fence_init(struct fence *fence, const struct fence_ops *ops,
516 spinlock_t *lock, unsigned context, unsigned seqno) 516 spinlock_t *lock, u64 context, unsigned seqno)
517{ 517{
518 BUG_ON(!lock); 518 BUG_ON(!lock);
519 BUG_ON(!ops || !ops->wait || !ops->enable_signaling || 519 BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index c0bd5722c997..9566a62ad8e3 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -35,6 +35,17 @@
35#include <linux/reservation.h> 35#include <linux/reservation.h>
36#include <linux/export.h> 36#include <linux/export.h>
37 37
38/**
39 * DOC: Reservation Object Overview
40 *
41 * The reservation object provides a mechanism to manage shared and
42 * exclusive fences associated with a buffer. A reservation object
43 * can have attached one exclusive fence (normally associated with
44 * write operations) or N shared fences (read operations). The RCU
45 * mechanism is used to protect read access to fences from locked
46 * write-side updates.
47 */
48
38DEFINE_WW_CLASS(reservation_ww_class); 49DEFINE_WW_CLASS(reservation_ww_class);
39EXPORT_SYMBOL(reservation_ww_class); 50EXPORT_SYMBOL(reservation_ww_class);
40 51
@@ -43,9 +54,17 @@ EXPORT_SYMBOL(reservation_seqcount_class);
43 54
44const char reservation_seqcount_string[] = "reservation_seqcount"; 55const char reservation_seqcount_string[] = "reservation_seqcount";
45EXPORT_SYMBOL(reservation_seqcount_string); 56EXPORT_SYMBOL(reservation_seqcount_string);
46/* 57
47 * Reserve space to add a shared fence to a reservation_object, 58/**
48 * must be called with obj->lock held. 59 * reservation_object_reserve_shared - Reserve space to add a shared
60 * fence to a reservation_object.
61 * @obj: reservation object
62 *
63 * Should be called before reservation_object_add_shared_fence(). Must
64 * be called with obj->lock held.
65 *
66 * RETURNS
67 * Zero for success, or -errno
49 */ 68 */
50int reservation_object_reserve_shared(struct reservation_object *obj) 69int reservation_object_reserve_shared(struct reservation_object *obj)
51{ 70{
@@ -180,7 +199,11 @@ done:
180 fence_put(old_fence); 199 fence_put(old_fence);
181} 200}
182 201
183/* 202/**
203 * reservation_object_add_shared_fence - Add a fence to a shared slot
204 * @obj: the reservation object
205 * @fence: the shared fence to add
206 *
184 * Add a fence to a shared slot, obj->lock must be held, and 207 * Add a fence to a shared slot, obj->lock must be held, and
185 * reservation_object_reserve_shared_fence has been called. 208 * reservation_object_reserve_shared_fence has been called.
186 */ 209 */
@@ -200,6 +223,13 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
200} 223}
201EXPORT_SYMBOL(reservation_object_add_shared_fence); 224EXPORT_SYMBOL(reservation_object_add_shared_fence);
202 225
226/**
227 * reservation_object_add_excl_fence - Add an exclusive fence.
228 * @obj: the reservation object
229 * @fence: the shared fence to add
230 *
231 * Add a fence to the exclusive slot. The obj->lock must be held.
232 */
203void reservation_object_add_excl_fence(struct reservation_object *obj, 233void reservation_object_add_excl_fence(struct reservation_object *obj,
204 struct fence *fence) 234 struct fence *fence)
205{ 235{
@@ -233,6 +263,18 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
233} 263}
234EXPORT_SYMBOL(reservation_object_add_excl_fence); 264EXPORT_SYMBOL(reservation_object_add_excl_fence);
235 265
266/**
267 * reservation_object_get_fences_rcu - Get an object's shared and exclusive
268 * fences without update side lock held
269 * @obj: the reservation object
270 * @pfence_excl: the returned exclusive fence (or NULL)
271 * @pshared_count: the number of shared fences returned
272 * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
273 * the required size, and must be freed by caller)
274 *
275 * RETURNS
276 * Zero or -errno
277 */
236int reservation_object_get_fences_rcu(struct reservation_object *obj, 278int reservation_object_get_fences_rcu(struct reservation_object *obj,
237 struct fence **pfence_excl, 279 struct fence **pfence_excl,
238 unsigned *pshared_count, 280 unsigned *pshared_count,
@@ -319,6 +361,18 @@ unlock:
319} 361}
320EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); 362EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
321 363
364/**
365 * reservation_object_wait_timeout_rcu - Wait on reservation's objects
366 * shared and/or exclusive fences.
367 * @obj: the reservation object
368 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
369 * @intr: if true, do interruptible wait
370 * @timeout: timeout value in jiffies or zero to return immediately
371 *
372 * RETURNS
373 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
374 * greater than zer on success.
375 */
322long reservation_object_wait_timeout_rcu(struct reservation_object *obj, 376long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
323 bool wait_all, bool intr, 377 bool wait_all, bool intr,
324 unsigned long timeout) 378 unsigned long timeout)
@@ -416,6 +470,16 @@ reservation_object_test_signaled_single(struct fence *passed_fence)
416 return ret; 470 return ret;
417} 471}
418 472
473/**
474 * reservation_object_test_signaled_rcu - Test if a reservation object's
475 * fences have been signaled.
476 * @obj: the reservation object
477 * @test_all: if true, test all fences, otherwise only test the exclusive
478 * fence
479 *
480 * RETURNS
481 * true if all fences signaled, else false
482 */
419bool reservation_object_test_signaled_rcu(struct reservation_object *obj, 483bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
420 bool test_all) 484 bool test_all)
421{ 485{
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index f08cf2d8309e..9aaa608dfe01 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -82,7 +82,7 @@ struct sync_file *sync_file_create(struct fence *fence)
82 82
83 sync_file->num_fences = 1; 83 sync_file->num_fences = 1;
84 atomic_set(&sync_file->status, 1); 84 atomic_set(&sync_file->status, 1);
85 snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%d-%d", 85 snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
86 fence->ops->get_driver_name(fence), 86 fence->ops->get_driver_name(fence),
87 fence->ops->get_timeline_name(fence), fence->context, 87 fence->ops->get_timeline_name(fence), fence->context,
88 fence->seqno); 88 fence->seqno);
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index d39014daeef9..fc5f197906ac 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -29,7 +29,6 @@
29 29
30#include <mach/hardware.h> 30#include <mach/hardware.h>
31#include <mach/platform.h> 31#include <mach/platform.h>
32#include <mach/irqs.h>
33 32
34#define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000) 33#define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000)
35#define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004) 34#define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004)
@@ -371,61 +370,16 @@ static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin)
371 370
372static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset) 371static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset)
373{ 372{
374 return IRQ_LPC32XX_P0_P1_IRQ; 373 return -ENXIO;
375} 374}
376 375
377static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = {
378 IRQ_LPC32XX_GPIO_00,
379 IRQ_LPC32XX_GPIO_01,
380 IRQ_LPC32XX_GPIO_02,
381 IRQ_LPC32XX_GPIO_03,
382 IRQ_LPC32XX_GPIO_04,
383 IRQ_LPC32XX_GPIO_05,
384};
385
386static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset) 376static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset)
387{ 377{
388 if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table))
389 return lpc32xx_gpio_to_irq_gpio_p3_table[offset];
390 return -ENXIO; 378 return -ENXIO;
391} 379}
392 380
393static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = {
394 IRQ_LPC32XX_GPI_00,
395 IRQ_LPC32XX_GPI_01,
396 IRQ_LPC32XX_GPI_02,
397 IRQ_LPC32XX_GPI_03,
398 IRQ_LPC32XX_GPI_04,
399 IRQ_LPC32XX_GPI_05,
400 IRQ_LPC32XX_GPI_06,
401 IRQ_LPC32XX_GPI_07,
402 IRQ_LPC32XX_GPI_08,
403 IRQ_LPC32XX_GPI_09,
404 -ENXIO, /* 10 */
405 -ENXIO, /* 11 */
406 -ENXIO, /* 12 */
407 -ENXIO, /* 13 */
408 -ENXIO, /* 14 */
409 -ENXIO, /* 15 */
410 -ENXIO, /* 16 */
411 -ENXIO, /* 17 */
412 -ENXIO, /* 18 */
413 IRQ_LPC32XX_GPI_19,
414 -ENXIO, /* 20 */
415 -ENXIO, /* 21 */
416 -ENXIO, /* 22 */
417 -ENXIO, /* 23 */
418 -ENXIO, /* 24 */
419 -ENXIO, /* 25 */
420 -ENXIO, /* 26 */
421 -ENXIO, /* 27 */
422 IRQ_LPC32XX_GPI_28,
423};
424
425static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset) 381static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset)
426{ 382{
427 if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table))
428 return lpc32xx_gpio_to_irq_gpi_p3_table[offset];
429 return -ENXIO; 383 return -ENXIO;
430} 384}
431 385
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index d407f904a31c..24f60d28f0c0 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -20,6 +20,7 @@
20#include <linux/cdev.h> 20#include <linux/cdev.h>
21#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/uaccess.h> 22#include <linux/uaccess.h>
23#include <linux/compat.h>
23#include <uapi/linux/gpio.h> 24#include <uapi/linux/gpio.h>
24 25
25#include "gpiolib.h" 26#include "gpiolib.h"
@@ -316,7 +317,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
316{ 317{
317 struct gpio_device *gdev = filp->private_data; 318 struct gpio_device *gdev = filp->private_data;
318 struct gpio_chip *chip = gdev->chip; 319 struct gpio_chip *chip = gdev->chip;
319 int __user *ip = (int __user *)arg; 320 void __user *ip = (void __user *)arg;
320 321
321 /* We fail any subsequent ioctl():s when the chip is gone */ 322 /* We fail any subsequent ioctl():s when the chip is gone */
322 if (!chip) 323 if (!chip)
@@ -388,6 +389,14 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
388 return -EINVAL; 389 return -EINVAL;
389} 390}
390 391
392#ifdef CONFIG_COMPAT
393static long gpio_ioctl_compat(struct file *filp, unsigned int cmd,
394 unsigned long arg)
395{
396 return gpio_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
397}
398#endif
399
391/** 400/**
392 * gpio_chrdev_open() - open the chardev for ioctl operations 401 * gpio_chrdev_open() - open the chardev for ioctl operations
393 * @inode: inode for this chardev 402 * @inode: inode for this chardev
@@ -431,7 +440,9 @@ static const struct file_operations gpio_fileops = {
431 .owner = THIS_MODULE, 440 .owner = THIS_MODULE,
432 .llseek = noop_llseek, 441 .llseek = noop_llseek,
433 .unlocked_ioctl = gpio_ioctl, 442 .unlocked_ioctl = gpio_ioctl,
434 .compat_ioctl = gpio_ioctl, 443#ifdef CONFIG_COMPAT
444 .compat_ioctl = gpio_ioctl_compat,
445#endif
435}; 446};
436 447
437static void gpiodevice_release(struct device *dev) 448static void gpiodevice_release(struct device *dev)
@@ -618,6 +629,8 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
618 goto err_free_label; 629 goto err_free_label;
619 } 630 }
620 631
632 spin_unlock_irqrestore(&gpio_lock, flags);
633
621 for (i = 0; i < chip->ngpio; i++) { 634 for (i = 0; i < chip->ngpio; i++) {
622 struct gpio_desc *desc = &gdev->descs[i]; 635 struct gpio_desc *desc = &gdev->descs[i];
623 636
@@ -649,8 +662,6 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
649 } 662 }
650 } 663 }
651 664
652 spin_unlock_irqrestore(&gpio_lock, flags);
653
654#ifdef CONFIG_PINCTRL 665#ifdef CONFIG_PINCTRL
655 INIT_LIST_HEAD(&gdev->pin_ranges); 666 INIT_LIST_HEAD(&gdev->pin_ranges);
656#endif 667#endif
@@ -1356,10 +1367,13 @@ done:
1356/* 1367/*
1357 * This descriptor validation needs to be inserted verbatim into each 1368 * This descriptor validation needs to be inserted verbatim into each
1358 * function taking a descriptor, so we need to use a preprocessor 1369 * function taking a descriptor, so we need to use a preprocessor
1359 * macro to avoid endless duplication. 1370 * macro to avoid endless duplication. If the desc is NULL it is an
1371 * optional GPIO and calls should just bail out.
1360 */ 1372 */
1361#define VALIDATE_DESC(desc) do { \ 1373#define VALIDATE_DESC(desc) do { \
1362 if (!desc || !desc->gdev) { \ 1374 if (!desc) \
1375 return 0; \
1376 if (!desc->gdev) { \
1363 pr_warn("%s: invalid GPIO\n", __func__); \ 1377 pr_warn("%s: invalid GPIO\n", __func__); \
1364 return -EINVAL; \ 1378 return -EINVAL; \
1365 } \ 1379 } \
@@ -1370,7 +1384,9 @@ done:
1370 } } while (0) 1384 } } while (0)
1371 1385
1372#define VALIDATE_DESC_VOID(desc) do { \ 1386#define VALIDATE_DESC_VOID(desc) do { \
1373 if (!desc || !desc->gdev) { \ 1387 if (!desc) \
1388 return; \
1389 if (!desc->gdev) { \
1374 pr_warn("%s: invalid GPIO\n", __func__); \ 1390 pr_warn("%s: invalid GPIO\n", __func__); \
1375 return; \ 1391 return; \
1376 } \ 1392 } \
@@ -2066,17 +2082,30 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq);
2066 */ 2082 */
2067int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) 2083int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
2068{ 2084{
2069 if (offset >= chip->ngpio) 2085 struct gpio_desc *desc;
2070 return -EINVAL; 2086
2087 desc = gpiochip_get_desc(chip, offset);
2088 if (IS_ERR(desc))
2089 return PTR_ERR(desc);
2090
2091 /* Flush direction if something changed behind our back */
2092 if (chip->get_direction) {
2093 int dir = chip->get_direction(chip, offset);
2094
2095 if (dir)
2096 clear_bit(FLAG_IS_OUT, &desc->flags);
2097 else
2098 set_bit(FLAG_IS_OUT, &desc->flags);
2099 }
2071 2100
2072 if (test_bit(FLAG_IS_OUT, &chip->gpiodev->descs[offset].flags)) { 2101 if (test_bit(FLAG_IS_OUT, &desc->flags)) {
2073 chip_err(chip, 2102 chip_err(chip,
2074 "%s: tried to flag a GPIO set as output for IRQ\n", 2103 "%s: tried to flag a GPIO set as output for IRQ\n",
2075 __func__); 2104 __func__);
2076 return -EIO; 2105 return -EIO;
2077 } 2106 }
2078 2107
2079 set_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags); 2108 set_bit(FLAG_USED_AS_IRQ, &desc->flags);
2080 return 0; 2109 return 0;
2081} 2110}
2082EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq); 2111EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index be43afb08c69..e3dba6f44a79 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
8 drm_lock.o drm_memory.o drm_drv.o drm_vm.o \ 8 drm_lock.o drm_memory.o drm_drv.o drm_vm.o \
9 drm_scatter.o drm_pci.o \ 9 drm_scatter.o drm_pci.o \
10 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ 10 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
11 drm_crtc.o drm_modes.o drm_edid.o \ 11 drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
12 drm_info.o drm_debugfs.o drm_encoder_slave.o \ 12 drm_info.o drm_debugfs.o drm_encoder_slave.o \
13 drm_trace_points.o drm_global.o drm_prime.o \ 13 drm_trace_points.o drm_global.o drm_prime.o \
14 drm_rect.o drm_vma_manager.o drm_flip_work.o \ 14 drm_rect.o drm_vma_manager.o drm_flip_work.o \
@@ -23,7 +23,8 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o
23 23
24drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ 24drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
25 drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ 25 drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
26 drm_kms_helper_common.o drm_dp_dual_mode_helper.o 26 drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
27 drm_simple_kms_helper.o
27 28
28drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o 29drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
29drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o 30drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 992f00b65be4..da3d02154fa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -2032,7 +2032,7 @@ struct amdgpu_device {
2032 struct amdgpu_irq_src hpd_irq; 2032 struct amdgpu_irq_src hpd_irq;
2033 2033
2034 /* rings */ 2034 /* rings */
2035 unsigned fence_context; 2035 u64 fence_context;
2036 unsigned num_rings; 2036 unsigned num_rings;
2037 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; 2037 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
2038 bool ib_pool_ready; 2038 bool ib_pool_ready;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b0832da2ef7e..a6eecf6f9065 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -240,7 +240,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
240 240
241 work->base = base; 241 work->base = base;
242 242
243 r = drm_vblank_get(crtc->dev, amdgpu_crtc->crtc_id); 243 r = drm_crtc_vblank_get(crtc);
244 if (r) { 244 if (r) {
245 DRM_ERROR("failed to get vblank before flip\n"); 245 DRM_ERROR("failed to get vblank before flip\n");
246 goto pflip_cleanup; 246 goto pflip_cleanup;
@@ -268,7 +268,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
268 return 0; 268 return 0;
269 269
270vblank_cleanup: 270vblank_cleanup:
271 drm_vblank_put(crtc->dev, amdgpu_crtc->crtc_id); 271 drm_crtc_vblank_put(crtc);
272 272
273pflip_cleanup: 273pflip_cleanup:
274 if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) { 274 if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 8bf84efafb04..b16366c2b4a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -427,7 +427,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
427 soffset, eoffset, eoffset - soffset); 427 soffset, eoffset, eoffset - soffset);
428 428
429 if (i->fence) 429 if (i->fence)
430 seq_printf(m, " protected by 0x%08x on context %d", 430 seq_printf(m, " protected by 0x%08x on context %llu",
431 i->fence->seqno, i->fence->context); 431 i->fence->seqno, i->fence->context);
432 432
433 seq_printf(m, "\n"); 433 seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 8227344d2ff6..c1b04e9aab57 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2667,19 +2667,21 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
2667 } 2667 }
2668} 2668}
2669 2669
2670static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2670static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2671 u16 *blue, uint32_t start, uint32_t size) 2671 u16 *blue, uint32_t size)
2672{ 2672{
2673 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2673 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2674 int end = (start + size > 256) ? 256 : start + size, i; 2674 int i;
2675 2675
2676 /* userspace palettes are always correct as is */ 2676 /* userspace palettes are always correct as is */
2677 for (i = start; i < end; i++) { 2677 for (i = 0; i < size; i++) {
2678 amdgpu_crtc->lut_r[i] = red[i] >> 6; 2678 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2679 amdgpu_crtc->lut_g[i] = green[i] >> 6; 2679 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2680 amdgpu_crtc->lut_b[i] = blue[i] >> 6; 2680 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2681 } 2681 }
2682 dce_v10_0_crtc_load_lut(crtc); 2682 dce_v10_0_crtc_load_lut(crtc);
2683
2684 return 0;
2683} 2685}
2684 2686
2685static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc) 2687static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2717,13 +2719,13 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2717 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2719 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2718 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2720 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2719 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2721 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2720 drm_vblank_on(dev, amdgpu_crtc->crtc_id); 2722 drm_crtc_vblank_on(crtc);
2721 dce_v10_0_crtc_load_lut(crtc); 2723 dce_v10_0_crtc_load_lut(crtc);
2722 break; 2724 break;
2723 case DRM_MODE_DPMS_STANDBY: 2725 case DRM_MODE_DPMS_STANDBY:
2724 case DRM_MODE_DPMS_SUSPEND: 2726 case DRM_MODE_DPMS_SUSPEND:
2725 case DRM_MODE_DPMS_OFF: 2727 case DRM_MODE_DPMS_OFF:
2726 drm_vblank_off(dev, amdgpu_crtc->crtc_id); 2728 drm_crtc_vblank_off(crtc);
2727 if (amdgpu_crtc->enabled) { 2729 if (amdgpu_crtc->enabled) {
2728 dce_v10_0_vga_enable(crtc, true); 2730 dce_v10_0_vga_enable(crtc, true);
2729 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2731 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -3372,7 +3374,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
3372 3374
3373 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3375 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3374 3376
3375 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3377 drm_crtc_vblank_put(&amdgpu_crtc->base);
3376 schedule_work(&works->unpin_work); 3378 schedule_work(&works->unpin_work);
3377 3379
3378 return 0; 3380 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index af26ec0bc59d..c90408bc0fde 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2678,19 +2678,21 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
2678 } 2678 }
2679} 2679}
2680 2680
2681static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2681static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2682 u16 *blue, uint32_t start, uint32_t size) 2682 u16 *blue, uint32_t size)
2683{ 2683{
2684 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2684 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2685 int end = (start + size > 256) ? 256 : start + size, i; 2685 int i;
2686 2686
2687 /* userspace palettes are always correct as is */ 2687 /* userspace palettes are always correct as is */
2688 for (i = start; i < end; i++) { 2688 for (i = 0; i < size; i++) {
2689 amdgpu_crtc->lut_r[i] = red[i] >> 6; 2689 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2690 amdgpu_crtc->lut_g[i] = green[i] >> 6; 2690 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2691 amdgpu_crtc->lut_b[i] = blue[i] >> 6; 2691 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2692 } 2692 }
2693 dce_v11_0_crtc_load_lut(crtc); 2693 dce_v11_0_crtc_load_lut(crtc);
2694
2695 return 0;
2694} 2696}
2695 2697
2696static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc) 2698static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2728,13 +2730,13 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2728 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2730 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2729 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2731 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2730 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2732 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2731 drm_vblank_on(dev, amdgpu_crtc->crtc_id); 2733 drm_crtc_vblank_on(crtc);
2732 dce_v11_0_crtc_load_lut(crtc); 2734 dce_v11_0_crtc_load_lut(crtc);
2733 break; 2735 break;
2734 case DRM_MODE_DPMS_STANDBY: 2736 case DRM_MODE_DPMS_STANDBY:
2735 case DRM_MODE_DPMS_SUSPEND: 2737 case DRM_MODE_DPMS_SUSPEND:
2736 case DRM_MODE_DPMS_OFF: 2738 case DRM_MODE_DPMS_OFF:
2737 drm_vblank_off(dev, amdgpu_crtc->crtc_id); 2739 drm_crtc_vblank_off(crtc);
2738 if (amdgpu_crtc->enabled) { 2740 if (amdgpu_crtc->enabled) {
2739 dce_v11_0_vga_enable(crtc, true); 2741 dce_v11_0_vga_enable(crtc, true);
2740 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2742 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -3433,7 +3435,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
3433 3435
3434 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3436 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3435 3437
3436 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3438 drm_crtc_vblank_put(&amdgpu_crtc->base);
3437 schedule_work(&works->unpin_work); 3439 schedule_work(&works->unpin_work);
3438 3440
3439 return 0; 3441 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 3fb65e41a6ef..300ff4aab0fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2574,19 +2574,21 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2574 } 2574 }
2575} 2575}
2576 2576
2577static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2577static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2578 u16 *blue, uint32_t start, uint32_t size) 2578 u16 *blue, uint32_t size)
2579{ 2579{
2580 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2580 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2581 int end = (start + size > 256) ? 256 : start + size, i; 2581 int i;
2582 2582
2583 /* userspace palettes are always correct as is */ 2583 /* userspace palettes are always correct as is */
2584 for (i = start; i < end; i++) { 2584 for (i = 0; i < size; i++) {
2585 amdgpu_crtc->lut_r[i] = red[i] >> 6; 2585 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2586 amdgpu_crtc->lut_g[i] = green[i] >> 6; 2586 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2587 amdgpu_crtc->lut_b[i] = blue[i] >> 6; 2587 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2588 } 2588 }
2589 dce_v8_0_crtc_load_lut(crtc); 2589 dce_v8_0_crtc_load_lut(crtc);
2590
2591 return 0;
2590} 2592}
2591 2593
2592static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc) 2594static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2624,13 +2626,13 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2624 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2626 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2625 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2627 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2626 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2628 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2627 drm_vblank_on(dev, amdgpu_crtc->crtc_id); 2629 drm_crtc_vblank_on(crtc);
2628 dce_v8_0_crtc_load_lut(crtc); 2630 dce_v8_0_crtc_load_lut(crtc);
2629 break; 2631 break;
2630 case DRM_MODE_DPMS_STANDBY: 2632 case DRM_MODE_DPMS_STANDBY:
2631 case DRM_MODE_DPMS_SUSPEND: 2633 case DRM_MODE_DPMS_SUSPEND:
2632 case DRM_MODE_DPMS_OFF: 2634 case DRM_MODE_DPMS_OFF:
2633 drm_vblank_off(dev, amdgpu_crtc->crtc_id); 2635 drm_crtc_vblank_off(crtc);
2634 if (amdgpu_crtc->enabled) { 2636 if (amdgpu_crtc->enabled) {
2635 dce_v8_0_vga_enable(crtc, true); 2637 dce_v8_0_vga_enable(crtc, true);
2636 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2638 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -3376,7 +3378,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3376 3378
3377 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3379 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3378 3380
3379 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3381 drm_crtc_vblank_put(&amdgpu_crtc->base);
3380 schedule_work(&works->unpin_work); 3382 schedule_work(&works->unpin_work);
3381 3383
3382 return 0; 3384 return 0;
diff --git a/drivers/gpu/drm/arc/arcpgu.h b/drivers/gpu/drm/arc/arcpgu.h
index 86574b698a78..8c01a25d279a 100644
--- a/drivers/gpu/drm/arc/arcpgu.h
+++ b/drivers/gpu/drm/arc/arcpgu.h
@@ -22,7 +22,6 @@ struct arcpgu_drm_private {
22 struct clk *clk; 22 struct clk *clk;
23 struct drm_fbdev_cma *fbdev; 23 struct drm_fbdev_cma *fbdev;
24 struct drm_framebuffer *fb; 24 struct drm_framebuffer *fb;
25 struct list_head event_list;
26 struct drm_crtc crtc; 25 struct drm_crtc crtc;
27 struct drm_plane *plane; 26 struct drm_plane *plane;
28}; 27};
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 92f8beff8e60..ee0a61c2861b 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -145,20 +145,14 @@ static int arc_pgu_crtc_atomic_check(struct drm_crtc *crtc,
145static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc, 145static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
146 struct drm_crtc_state *state) 146 struct drm_crtc_state *state)
147{ 147{
148 struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); 148 struct drm_pending_vblank_event *event = crtc->state->event;
149 unsigned long flags;
150
151 if (crtc->state->event) {
152 struct drm_pending_vblank_event *event = crtc->state->event;
153 149
150 if (event) {
154 crtc->state->event = NULL; 151 crtc->state->event = NULL;
155 event->pipe = drm_crtc_index(crtc);
156
157 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
158 152
159 spin_lock_irqsave(&crtc->dev->event_lock, flags); 153 spin_lock_irq(&crtc->dev->event_lock);
160 list_add_tail(&event->base.link, &arcpgu->event_list); 154 drm_crtc_send_vblank_event(crtc, event);
161 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 155 spin_unlock_irq(&crtc->dev->event_lock);
162 } 156 }
163} 157}
164 158
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 76e187a5bde0..a92e533531c3 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -32,17 +32,11 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
32 drm_fbdev_cma_hotplug_event(arcpgu->fbdev); 32 drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
33} 33}
34 34
35static int arcpgu_atomic_commit(struct drm_device *dev,
36 struct drm_atomic_state *state, bool async)
37{
38 return drm_atomic_helper_commit(dev, state, false);
39}
40
41static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = { 35static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
42 .fb_create = drm_fb_cma_create, 36 .fb_create = drm_fb_cma_create,
43 .output_poll_changed = arcpgu_fb_output_poll_changed, 37 .output_poll_changed = arcpgu_fb_output_poll_changed,
44 .atomic_check = drm_atomic_helper_check, 38 .atomic_check = drm_atomic_helper_check,
45 .atomic_commit = arcpgu_atomic_commit, 39 .atomic_commit = drm_atomic_helper_commit,
46}; 40};
47 41
48static void arcpgu_setup_mode_config(struct drm_device *drm) 42static void arcpgu_setup_mode_config(struct drm_device *drm)
@@ -81,22 +75,6 @@ static const struct file_operations arcpgu_drm_ops = {
81 .mmap = arcpgu_gem_mmap, 75 .mmap = arcpgu_gem_mmap,
82}; 76};
83 77
84static void arcpgu_preclose(struct drm_device *drm, struct drm_file *file)
85{
86 struct arcpgu_drm_private *arcpgu = drm->dev_private;
87 struct drm_pending_vblank_event *e, *t;
88 unsigned long flags;
89
90 spin_lock_irqsave(&drm->event_lock, flags);
91 list_for_each_entry_safe(e, t, &arcpgu->event_list, base.link) {
92 if (e->base.file_priv != file)
93 continue;
94 list_del(&e->base.link);
95 e->base.destroy(&e->base);
96 }
97 spin_unlock_irqrestore(&drm->event_lock, flags);
98}
99
100static void arcpgu_lastclose(struct drm_device *drm) 78static void arcpgu_lastclose(struct drm_device *drm)
101{ 79{
102 struct arcpgu_drm_private *arcpgu = drm->dev_private; 80 struct arcpgu_drm_private *arcpgu = drm->dev_private;
@@ -122,8 +100,6 @@ static int arcpgu_load(struct drm_device *drm)
122 if (IS_ERR(arcpgu->clk)) 100 if (IS_ERR(arcpgu->clk))
123 return PTR_ERR(arcpgu->clk); 101 return PTR_ERR(arcpgu->clk);
124 102
125 INIT_LIST_HEAD(&arcpgu->event_list);
126
127 arcpgu_setup_mode_config(drm); 103 arcpgu_setup_mode_config(drm);
128 104
129 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 105 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -192,7 +168,6 @@ int arcpgu_unload(struct drm_device *drm)
192static struct drm_driver arcpgu_drm_driver = { 168static struct drm_driver arcpgu_drm_driver = {
193 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | 169 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
194 DRIVER_ATOMIC, 170 DRIVER_ATOMIC,
195 .preclose = arcpgu_preclose,
196 .lastclose = arcpgu_lastclose, 171 .lastclose = arcpgu_lastclose,
197 .name = "drm-arcpgu", 172 .name = "drm-arcpgu",
198 .desc = "ARC PGU Controller", 173 .desc = "ARC PGU Controller",
@@ -207,7 +182,7 @@ static struct drm_driver arcpgu_drm_driver = {
207 .get_vblank_counter = drm_vblank_no_hw_counter, 182 .get_vblank_counter = drm_vblank_no_hw_counter,
208 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 183 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
209 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 184 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
210 .gem_free_object = drm_gem_cma_free_object, 185 .gem_free_object_unlocked = drm_gem_cma_free_object,
211 .gem_vm_ops = &drm_gem_cma_vm_ops, 186 .gem_vm_ops = &drm_gem_cma_vm_ops,
212 .gem_prime_export = drm_gem_prime_export, 187 .gem_prime_export = drm_gem_prime_export,
213 .gem_prime_import = drm_gem_prime_import, 188 .gem_prime_import = drm_gem_prime_import,
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index 08b6baeb320d..b7a8b2ac4055 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -46,23 +46,6 @@ static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
46 return sfuncs->get_modes(&slave->base, connector); 46 return sfuncs->get_modes(&slave->base, connector);
47} 47}
48 48
49struct drm_encoder *
50arcpgu_drm_connector_best_encoder(struct drm_connector *connector)
51{
52 struct drm_encoder_slave *slave;
53 struct arcpgu_drm_connector *con =
54 container_of(connector, struct arcpgu_drm_connector, connector);
55
56 slave = con->encoder_slave;
57 if (slave == NULL) {
58 dev_err(connector->dev->dev,
59 "connector_best_encoder: cannot find slave encoder for connector\n");
60 return NULL;
61 }
62
63 return &slave->base;
64}
65
66static enum drm_connector_status 49static enum drm_connector_status
67arcpgu_drm_connector_detect(struct drm_connector *connector, bool force) 50arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
68{ 51{
@@ -97,7 +80,6 @@ static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
97static const struct drm_connector_helper_funcs 80static const struct drm_connector_helper_funcs
98arcpgu_drm_connector_helper_funcs = { 81arcpgu_drm_connector_helper_funcs = {
99 .get_modes = arcpgu_drm_connector_get_modes, 82 .get_modes = arcpgu_drm_connector_get_modes,
100 .best_encoder = arcpgu_drm_connector_best_encoder,
101}; 83};
102 84
103static const struct drm_connector_funcs arcpgu_drm_connector_funcs = { 85static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index fef1b04c2aab..48019ae22ddb 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -33,8 +33,17 @@
33 * 33 *
34 */ 34 */
35 35
36static void hdlcd_crtc_cleanup(struct drm_crtc *crtc)
37{
38 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
39
40 /* stop the controller on cleanup */
41 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
42 drm_crtc_cleanup(crtc);
43}
44
36static const struct drm_crtc_funcs hdlcd_crtc_funcs = { 45static const struct drm_crtc_funcs hdlcd_crtc_funcs = {
37 .destroy = drm_crtc_cleanup, 46 .destroy = hdlcd_crtc_cleanup,
38 .set_config = drm_atomic_helper_set_config, 47 .set_config = drm_atomic_helper_set_config,
39 .page_flip = drm_atomic_helper_page_flip, 48 .page_flip = drm_atomic_helper_page_flip,
40 .reset = drm_atomic_helper_crtc_reset, 49 .reset = drm_atomic_helper_crtc_reset,
@@ -97,7 +106,7 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
97 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); 106 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
98 struct drm_display_mode *m = &crtc->state->adjusted_mode; 107 struct drm_display_mode *m = &crtc->state->adjusted_mode;
99 struct videomode vm; 108 struct videomode vm;
100 unsigned int polarities, line_length, err; 109 unsigned int polarities, err;
101 110
102 vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay; 111 vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay;
103 vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end; 112 vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end;
@@ -113,23 +122,18 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
113 if (m->flags & DRM_MODE_FLAG_PVSYNC) 122 if (m->flags & DRM_MODE_FLAG_PVSYNC)
114 polarities |= HDLCD_POLARITY_VSYNC; 123 polarities |= HDLCD_POLARITY_VSYNC;
115 124
116 line_length = crtc->primary->state->fb->pitches[0];
117
118 /* Allow max number of outstanding requests and largest burst size */ 125 /* Allow max number of outstanding requests and largest burst size */
119 hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS, 126 hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS,
120 HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16); 127 HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16);
121 128
122 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, line_length);
123 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, line_length);
124 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, m->crtc_vdisplay - 1);
125 hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1); 129 hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1);
126 hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1); 130 hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1);
127 hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1); 131 hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1);
128 hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1); 132 hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1);
133 hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1);
129 hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1); 134 hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1);
130 hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1); 135 hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1);
131 hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1); 136 hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1);
132 hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1);
133 hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities); 137 hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities);
134 138
135 err = hdlcd_set_pxl_fmt(crtc); 139 err = hdlcd_set_pxl_fmt(crtc);
@@ -144,20 +148,19 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc)
144 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); 148 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
145 149
146 clk_prepare_enable(hdlcd->clk); 150 clk_prepare_enable(hdlcd->clk);
151 hdlcd_crtc_mode_set_nofb(crtc);
147 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1); 152 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1);
148 drm_crtc_vblank_on(crtc);
149} 153}
150 154
151static void hdlcd_crtc_disable(struct drm_crtc *crtc) 155static void hdlcd_crtc_disable(struct drm_crtc *crtc)
152{ 156{
153 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); 157 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
154 158
155 if (!crtc->primary->fb) 159 if (!crtc->state->active)
156 return; 160 return;
157 161
158 clk_disable_unprepare(hdlcd->clk);
159 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); 162 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
160 drm_crtc_vblank_off(crtc); 163 clk_disable_unprepare(hdlcd->clk);
161} 164}
162 165
163static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, 166static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
@@ -179,52 +182,39 @@ static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
179static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, 182static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
180 struct drm_crtc_state *state) 183 struct drm_crtc_state *state)
181{ 184{
182 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); 185 struct drm_pending_vblank_event *event = crtc->state->event;
183 unsigned long flags;
184
185 if (crtc->state->event) {
186 struct drm_pending_vblank_event *event = crtc->state->event;
187 186
187 if (event) {
188 crtc->state->event = NULL; 188 crtc->state->event = NULL;
189 event->pipe = drm_crtc_index(crtc);
190
191 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
192 189
193 spin_lock_irqsave(&crtc->dev->event_lock, flags); 190 spin_lock_irq(&crtc->dev->event_lock);
194 list_add_tail(&event->base.link, &hdlcd->event_list); 191 if (drm_crtc_vblank_get(crtc) == 0)
195 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 192 drm_crtc_arm_vblank_event(crtc, event);
193 else
194 drm_crtc_send_vblank_event(crtc, event);
195 spin_unlock_irq(&crtc->dev->event_lock);
196 } 196 }
197} 197}
198 198
199static void hdlcd_crtc_atomic_flush(struct drm_crtc *crtc,
200 struct drm_crtc_state *state)
201{
202}
203
204static bool hdlcd_crtc_mode_fixup(struct drm_crtc *crtc,
205 const struct drm_display_mode *mode,
206 struct drm_display_mode *adjusted_mode)
207{
208 return true;
209}
210
211static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { 199static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
212 .mode_fixup = hdlcd_crtc_mode_fixup,
213 .mode_set = drm_helper_crtc_mode_set,
214 .mode_set_base = drm_helper_crtc_mode_set_base,
215 .mode_set_nofb = hdlcd_crtc_mode_set_nofb,
216 .enable = hdlcd_crtc_enable, 200 .enable = hdlcd_crtc_enable,
217 .disable = hdlcd_crtc_disable, 201 .disable = hdlcd_crtc_disable,
218 .prepare = hdlcd_crtc_disable,
219 .commit = hdlcd_crtc_enable,
220 .atomic_check = hdlcd_crtc_atomic_check, 202 .atomic_check = hdlcd_crtc_atomic_check,
221 .atomic_begin = hdlcd_crtc_atomic_begin, 203 .atomic_begin = hdlcd_crtc_atomic_begin,
222 .atomic_flush = hdlcd_crtc_atomic_flush,
223}; 204};
224 205
225static int hdlcd_plane_atomic_check(struct drm_plane *plane, 206static int hdlcd_plane_atomic_check(struct drm_plane *plane,
226 struct drm_plane_state *state) 207 struct drm_plane_state *state)
227{ 208{
209 u32 src_w, src_h;
210
211 src_w = state->src_w >> 16;
212 src_h = state->src_h >> 16;
213
214 /* we can't do any scaling of the plane source */
215 if ((src_w != state->crtc_w) || (src_h != state->crtc_h))
216 return -EINVAL;
217
228 return 0; 218 return 0;
229} 219}
230 220
@@ -233,20 +223,31 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
233{ 223{
234 struct hdlcd_drm_private *hdlcd; 224 struct hdlcd_drm_private *hdlcd;
235 struct drm_gem_cma_object *gem; 225 struct drm_gem_cma_object *gem;
226 unsigned int depth, bpp;
227 u32 src_w, src_h, dest_w, dest_h;
236 dma_addr_t scanout_start; 228 dma_addr_t scanout_start;
237 229
238 if (!plane->state->crtc || !plane->state->fb) 230 if (!plane->state->fb)
239 return; 231 return;
240 232
241 hdlcd = crtc_to_hdlcd_priv(plane->state->crtc); 233 drm_fb_get_bpp_depth(plane->state->fb->pixel_format, &depth, &bpp);
234 src_w = plane->state->src_w >> 16;
235 src_h = plane->state->src_h >> 16;
236 dest_w = plane->state->crtc_w;
237 dest_h = plane->state->crtc_h;
242 gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0); 238 gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
243 scanout_start = gem->paddr; 239 scanout_start = gem->paddr + plane->state->fb->offsets[0] +
240 plane->state->crtc_y * plane->state->fb->pitches[0] +
241 plane->state->crtc_x * bpp / 8;
242
243 hdlcd = plane->dev->dev_private;
244 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]);
245 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, plane->state->fb->pitches[0]);
246 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1);
244 hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start); 247 hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start);
245} 248}
246 249
247static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = { 250static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = {
248 .prepare_fb = NULL,
249 .cleanup_fb = NULL,
250 .atomic_check = hdlcd_plane_atomic_check, 251 .atomic_check = hdlcd_plane_atomic_check,
251 .atomic_update = hdlcd_plane_atomic_update, 252 .atomic_update = hdlcd_plane_atomic_update,
252}; 253};
@@ -294,16 +295,6 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
294 return plane; 295 return plane;
295} 296}
296 297
297void hdlcd_crtc_suspend(struct drm_crtc *crtc)
298{
299 hdlcd_crtc_disable(crtc);
300}
301
302void hdlcd_crtc_resume(struct drm_crtc *crtc)
303{
304 hdlcd_crtc_enable(crtc);
305}
306
307int hdlcd_setup_crtc(struct drm_device *drm) 298int hdlcd_setup_crtc(struct drm_device *drm)
308{ 299{
309 struct hdlcd_drm_private *hdlcd = drm->dev_private; 300 struct hdlcd_drm_private *hdlcd = drm->dev_private;
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index b987c63ba8d6..74279be20b75 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -49,8 +49,6 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
49 atomic_set(&hdlcd->dma_end_count, 0); 49 atomic_set(&hdlcd->dma_end_count, 0);
50#endif 50#endif
51 51
52 INIT_LIST_HEAD(&hdlcd->event_list);
53
54 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 52 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
55 hdlcd->mmio = devm_ioremap_resource(drm->dev, res); 53 hdlcd->mmio = devm_ioremap_resource(drm->dev, res);
56 if (IS_ERR(hdlcd->mmio)) { 54 if (IS_ERR(hdlcd->mmio)) {
@@ -84,11 +82,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
84 goto setup_fail; 82 goto setup_fail;
85 } 83 }
86 84
87 pm_runtime_enable(drm->dev);
88
89 pm_runtime_get_sync(drm->dev);
90 ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); 85 ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
91 pm_runtime_put_sync(drm->dev);
92 if (ret < 0) { 86 if (ret < 0) {
93 DRM_ERROR("failed to install IRQ handler\n"); 87 DRM_ERROR("failed to install IRQ handler\n");
94 goto irq_fail; 88 goto irq_fail;
@@ -112,17 +106,11 @@ static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
112 drm_fbdev_cma_hotplug_event(hdlcd->fbdev); 106 drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
113} 107}
114 108
115static int hdlcd_atomic_commit(struct drm_device *dev,
116 struct drm_atomic_state *state, bool nonblock)
117{
118 return drm_atomic_helper_commit(dev, state, false);
119}
120
121static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = { 109static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
122 .fb_create = drm_fb_cma_create, 110 .fb_create = drm_fb_cma_create,
123 .output_poll_changed = hdlcd_fb_output_poll_changed, 111 .output_poll_changed = hdlcd_fb_output_poll_changed,
124 .atomic_check = drm_atomic_helper_check, 112 .atomic_check = drm_atomic_helper_check,
125 .atomic_commit = hdlcd_atomic_commit, 113 .atomic_commit = drm_atomic_helper_commit,
126}; 114};
127 115
128static void hdlcd_setup_mode_config(struct drm_device *drm) 116static void hdlcd_setup_mode_config(struct drm_device *drm)
@@ -164,24 +152,9 @@ static irqreturn_t hdlcd_irq(int irq, void *arg)
164 atomic_inc(&hdlcd->vsync_count); 152 atomic_inc(&hdlcd->vsync_count);
165 153
166#endif 154#endif
167 if (irq_status & HDLCD_INTERRUPT_VSYNC) { 155 if (irq_status & HDLCD_INTERRUPT_VSYNC)
168 bool events_sent = false;
169 unsigned long flags;
170 struct drm_pending_vblank_event *e, *t;
171
172 drm_crtc_handle_vblank(&hdlcd->crtc); 156 drm_crtc_handle_vblank(&hdlcd->crtc);
173 157
174 spin_lock_irqsave(&drm->event_lock, flags);
175 list_for_each_entry_safe(e, t, &hdlcd->event_list, base.link) {
176 list_del(&e->base.link);
177 drm_crtc_send_vblank_event(&hdlcd->crtc, e);
178 events_sent = true;
179 }
180 if (events_sent)
181 drm_crtc_vblank_put(&hdlcd->crtc);
182 spin_unlock_irqrestore(&drm->event_lock, flags);
183 }
184
185 /* acknowledge interrupt(s) */ 158 /* acknowledge interrupt(s) */
186 hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status); 159 hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status);
187 160
@@ -275,6 +248,7 @@ static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
275static struct drm_info_list hdlcd_debugfs_list[] = { 248static struct drm_info_list hdlcd_debugfs_list[] = {
276 { "interrupt_count", hdlcd_show_underrun_count, 0 }, 249 { "interrupt_count", hdlcd_show_underrun_count, 0 },
277 { "clocks", hdlcd_show_pxlclock, 0 }, 250 { "clocks", hdlcd_show_pxlclock, 0 },
251 { "fb", drm_fb_cma_debugfs_show, 0 },
278}; 252};
279 253
280static int hdlcd_debugfs_init(struct drm_minor *minor) 254static int hdlcd_debugfs_init(struct drm_minor *minor)
@@ -316,7 +290,7 @@ static struct drm_driver hdlcd_driver = {
316 .get_vblank_counter = drm_vblank_no_hw_counter, 290 .get_vblank_counter = drm_vblank_no_hw_counter,
317 .enable_vblank = hdlcd_enable_vblank, 291 .enable_vblank = hdlcd_enable_vblank,
318 .disable_vblank = hdlcd_disable_vblank, 292 .disable_vblank = hdlcd_disable_vblank,
319 .gem_free_object = drm_gem_cma_free_object, 293 .gem_free_object_unlocked = drm_gem_cma_free_object,
320 .gem_vm_ops = &drm_gem_cma_vm_ops, 294 .gem_vm_ops = &drm_gem_cma_vm_ops,
321 .dumb_create = drm_gem_cma_dumb_create, 295 .dumb_create = drm_gem_cma_dumb_create,
322 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 296 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
@@ -357,6 +331,8 @@ static int hdlcd_drm_bind(struct device *dev)
357 return -ENOMEM; 331 return -ENOMEM;
358 332
359 drm->dev_private = hdlcd; 333 drm->dev_private = hdlcd;
334 dev_set_drvdata(dev, drm);
335
360 hdlcd_setup_mode_config(drm); 336 hdlcd_setup_mode_config(drm);
361 ret = hdlcd_load(drm, 0); 337 ret = hdlcd_load(drm, 0);
362 if (ret) 338 if (ret)
@@ -366,14 +342,18 @@ static int hdlcd_drm_bind(struct device *dev)
366 if (ret) 342 if (ret)
367 goto err_unload; 343 goto err_unload;
368 344
369 dev_set_drvdata(dev, drm);
370
371 ret = component_bind_all(dev, drm); 345 ret = component_bind_all(dev, drm);
372 if (ret) { 346 if (ret) {
373 DRM_ERROR("Failed to bind all components\n"); 347 DRM_ERROR("Failed to bind all components\n");
374 goto err_unregister; 348 goto err_unregister;
375 } 349 }
376 350
351 ret = pm_runtime_set_active(dev);
352 if (ret)
353 goto err_pm_active;
354
355 pm_runtime_enable(dev);
356
377 ret = drm_vblank_init(drm, drm->mode_config.num_crtc); 357 ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
378 if (ret < 0) { 358 if (ret < 0) {
379 DRM_ERROR("failed to initialise vblank\n"); 359 DRM_ERROR("failed to initialise vblank\n");
@@ -399,16 +379,16 @@ err_fbdev:
399 drm_mode_config_cleanup(drm); 379 drm_mode_config_cleanup(drm);
400 drm_vblank_cleanup(drm); 380 drm_vblank_cleanup(drm);
401err_vblank: 381err_vblank:
382 pm_runtime_disable(drm->dev);
383err_pm_active:
402 component_unbind_all(dev, drm); 384 component_unbind_all(dev, drm);
403err_unregister: 385err_unregister:
404 drm_dev_unregister(drm); 386 drm_dev_unregister(drm);
405err_unload: 387err_unload:
406 pm_runtime_get_sync(drm->dev);
407 drm_irq_uninstall(drm); 388 drm_irq_uninstall(drm);
408 pm_runtime_put_sync(drm->dev);
409 pm_runtime_disable(drm->dev);
410 of_reserved_mem_device_release(drm->dev); 389 of_reserved_mem_device_release(drm->dev);
411err_free: 390err_free:
391 dev_set_drvdata(dev, NULL);
412 drm_dev_unref(drm); 392 drm_dev_unref(drm);
413 393
414 return ret; 394 return ret;
@@ -495,30 +475,34 @@ MODULE_DEVICE_TABLE(of, hdlcd_of_match);
495static int __maybe_unused hdlcd_pm_suspend(struct device *dev) 475static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
496{ 476{
497 struct drm_device *drm = dev_get_drvdata(dev); 477 struct drm_device *drm = dev_get_drvdata(dev);
498 struct drm_crtc *crtc; 478 struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
499 479
500 if (pm_runtime_suspended(dev)) 480 if (!hdlcd)
501 return 0; 481 return 0;
502 482
503 drm_modeset_lock_all(drm); 483 drm_kms_helper_poll_disable(drm);
504 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) 484
505 hdlcd_crtc_suspend(crtc); 485 hdlcd->state = drm_atomic_helper_suspend(drm);
506 drm_modeset_unlock_all(drm); 486 if (IS_ERR(hdlcd->state)) {
487 drm_kms_helper_poll_enable(drm);
488 return PTR_ERR(hdlcd->state);
489 }
490
507 return 0; 491 return 0;
508} 492}
509 493
510static int __maybe_unused hdlcd_pm_resume(struct device *dev) 494static int __maybe_unused hdlcd_pm_resume(struct device *dev)
511{ 495{
512 struct drm_device *drm = dev_get_drvdata(dev); 496 struct drm_device *drm = dev_get_drvdata(dev);
513 struct drm_crtc *crtc; 497 struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
514 498
515 if (!pm_runtime_suspended(dev)) 499 if (!hdlcd)
516 return 0; 500 return 0;
517 501
518 drm_modeset_lock_all(drm); 502 drm_atomic_helper_resume(drm, hdlcd->state);
519 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) 503 drm_kms_helper_poll_enable(drm);
520 hdlcd_crtc_resume(crtc); 504 pm_runtime_set_active(dev);
521 drm_modeset_unlock_all(drm); 505
522 return 0; 506 return 0;
523} 507}
524 508
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h
index aa234784f053..e3950a071152 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.h
+++ b/drivers/gpu/drm/arm/hdlcd_drv.h
@@ -9,10 +9,9 @@ struct hdlcd_drm_private {
9 void __iomem *mmio; 9 void __iomem *mmio;
10 struct clk *clk; 10 struct clk *clk;
11 struct drm_fbdev_cma *fbdev; 11 struct drm_fbdev_cma *fbdev;
12 struct drm_framebuffer *fb;
13 struct list_head event_list;
14 struct drm_crtc crtc; 12 struct drm_crtc crtc;
15 struct drm_plane *plane; 13 struct drm_plane *plane;
14 struct drm_atomic_state *state;
16#ifdef CONFIG_DEBUG_FS 15#ifdef CONFIG_DEBUG_FS
17 atomic_t buffer_underrun_count; 16 atomic_t buffer_underrun_count;
18 atomic_t bus_error_count; 17 atomic_t bus_error_count;
@@ -36,7 +35,5 @@ static inline u32 hdlcd_read(struct hdlcd_drm_private *hdlcd, unsigned int reg)
36 35
37int hdlcd_setup_crtc(struct drm_device *dev); 36int hdlcd_setup_crtc(struct drm_device *dev);
38void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd); 37void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd);
39void hdlcd_crtc_suspend(struct drm_crtc *crtc);
40void hdlcd_crtc_resume(struct drm_crtc *crtc);
41 38
42#endif /* __HDLCD_DRV_H__ */ 39#endif /* __HDLCD_DRV_H__ */
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 3130aa8bcdd0..34405e4a5d36 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -199,7 +199,7 @@ static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
199 /* Handle any pending frame work. */ 199 /* Handle any pending frame work. */
200 if (work) { 200 if (work) {
201 work->fn(dcrtc, plane, work); 201 work->fn(dcrtc, plane, work);
202 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); 202 drm_crtc_vblank_put(&dcrtc->crtc);
203 } 203 }
204 204
205 wake_up(&plane->frame_wait); 205 wake_up(&plane->frame_wait);
@@ -210,7 +210,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
210{ 210{
211 int ret; 211 int ret;
212 212
213 ret = drm_vblank_get(dcrtc->crtc.dev, dcrtc->num); 213 ret = drm_crtc_vblank_get(&dcrtc->crtc);
214 if (ret) { 214 if (ret) {
215 DRM_ERROR("failed to acquire vblank counter\n"); 215 DRM_ERROR("failed to acquire vblank counter\n");
216 return ret; 216 return ret;
@@ -218,7 +218,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
218 218
219 ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0; 219 ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0;
220 if (ret) 220 if (ret)
221 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); 221 drm_crtc_vblank_put(&dcrtc->crtc);
222 222
223 return ret; 223 return ret;
224} 224}
@@ -234,7 +234,7 @@ struct armada_plane_work *armada_drm_plane_work_cancel(
234 struct armada_plane_work *work = xchg(&plane->work, NULL); 234 struct armada_plane_work *work = xchg(&plane->work, NULL);
235 235
236 if (work) 236 if (work)
237 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); 237 drm_crtc_vblank_put(&dcrtc->crtc);
238 238
239 return work; 239 return work;
240} 240}
@@ -260,7 +260,7 @@ static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
260 260
261 if (fwork->event) { 261 if (fwork->event) {
262 spin_lock_irqsave(&dev->event_lock, flags); 262 spin_lock_irqsave(&dev->event_lock, flags);
263 drm_send_vblank_event(dev, dcrtc->num, fwork->event); 263 drm_crtc_send_vblank_event(&dcrtc->crtc, fwork->event);
264 spin_unlock_irqrestore(&dev->event_lock, flags); 264 spin_unlock_irqrestore(&dev->event_lock, flags);
265 } 265 }
266 266
@@ -592,9 +592,9 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
592 592
593 if (interlaced ^ dcrtc->interlaced) { 593 if (interlaced ^ dcrtc->interlaced) {
594 if (adj->flags & DRM_MODE_FLAG_INTERLACE) 594 if (adj->flags & DRM_MODE_FLAG_INTERLACE)
595 drm_vblank_get(dcrtc->crtc.dev, dcrtc->num); 595 drm_crtc_vblank_get(&dcrtc->crtc);
596 else 596 else
597 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); 597 drm_crtc_vblank_put(&dcrtc->crtc);
598 dcrtc->interlaced = interlaced; 598 dcrtc->interlaced = interlaced;
599 } 599 }
600 600
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 439824a61aa5..cb21c0b6374a 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -197,7 +197,7 @@ static struct drm_driver armada_drm_driver = {
197 .debugfs_init = armada_drm_debugfs_init, 197 .debugfs_init = armada_drm_debugfs_init,
198 .debugfs_cleanup = armada_drm_debugfs_cleanup, 198 .debugfs_cleanup = armada_drm_debugfs_cleanup,
199#endif 199#endif
200 .gem_free_object = armada_gem_free_object, 200 .gem_free_object_unlocked = armada_gem_free_object,
201 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 201 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
202 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 202 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
203 .gem_prime_export = armada_gem_prime_export, 203 .gem_prime_export = armada_gem_prime_export,
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index fcd9c0714836..f54afd2113a9 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -209,7 +209,7 @@ static struct drm_driver driver = {
209 .minor = DRIVER_MINOR, 209 .minor = DRIVER_MINOR,
210 .patchlevel = DRIVER_PATCHLEVEL, 210 .patchlevel = DRIVER_PATCHLEVEL,
211 211
212 .gem_free_object = ast_gem_free_object, 212 .gem_free_object_unlocked = ast_gem_free_object,
213 .dumb_create = ast_dumb_create, 213 .dumb_create = ast_dumb_create,
214 .dumb_map_offset = ast_dumb_mmap_offset, 214 .dumb_map_offset = ast_dumb_mmap_offset,
215 .dumb_destroy = drm_gem_dumb_destroy, 215 .dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 5320f8c57884..c017a9330a18 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -167,12 +167,9 @@ static int astfb_create_object(struct ast_fbdev *afbdev,
167 struct drm_gem_object **gobj_p) 167 struct drm_gem_object **gobj_p)
168{ 168{
169 struct drm_device *dev = afbdev->helper.dev; 169 struct drm_device *dev = afbdev->helper.dev;
170 u32 bpp, depth;
171 u32 size; 170 u32 size;
172 struct drm_gem_object *gobj; 171 struct drm_gem_object *gobj;
173
174 int ret = 0; 172 int ret = 0;
175 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
176 173
177 size = mode_cmd->pitches[0] * mode_cmd->height; 174 size = mode_cmd->pitches[0] * mode_cmd->height;
178 ret = ast_gem_create(dev, size, true, &gobj); 175 ret = ast_gem_create(dev, size, true, &gobj);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c337922606e3..5957c3e659fe 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -624,19 +624,21 @@ static void ast_crtc_reset(struct drm_crtc *crtc)
624 624
625} 625}
626 626
627static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 627static int ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
628 u16 *blue, uint32_t start, uint32_t size) 628 u16 *blue, uint32_t size)
629{ 629{
630 struct ast_crtc *ast_crtc = to_ast_crtc(crtc); 630 struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
631 int end = (start + size > 256) ? 256 : start + size, i; 631 int i;
632 632
633 /* userspace palettes are always correct as is */ 633 /* userspace palettes are always correct as is */
634 for (i = start; i < end; i++) { 634 for (i = 0; i < size; i++) {
635 ast_crtc->lut_r[i] = red[i] >> 8; 635 ast_crtc->lut_r[i] = red[i] >> 8;
636 ast_crtc->lut_g[i] = green[i] >> 8; 636 ast_crtc->lut_g[i] = green[i] >> 8;
637 ast_crtc->lut_b[i] = blue[i] >> 8; 637 ast_crtc->lut_b[i] = blue[i] >> 8;
638 } 638 }
639 ast_crtc_load_lut(crtc); 639 ast_crtc_load_lut(crtc);
640
641 return 0;
640} 642}
641 643
642 644
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index cf23a755f777..613f6c99b76a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -374,8 +374,8 @@ static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc)
374 374
375 spin_lock_irqsave(&dev->event_lock, flags); 375 spin_lock_irqsave(&dev->event_lock, flags);
376 if (crtc->event) { 376 if (crtc->event) {
377 drm_send_vblank_event(dev, crtc->id, crtc->event); 377 drm_crtc_send_vblank_event(&crtc->base, crtc->event);
378 drm_vblank_put(dev, crtc->id); 378 drm_crtc_vblank_put(&crtc->base);
379 crtc->event = NULL; 379 crtc->event = NULL;
380 } 380 }
381 spin_unlock_irqrestore(&dev->event_lock, flags); 381 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -391,12 +391,11 @@ void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
391{ 391{
392 struct atmel_hlcdc_crtc_state *state; 392 struct atmel_hlcdc_crtc_state *state;
393 393
394 if (crtc->state && crtc->state->mode_blob)
395 drm_property_unreference_blob(crtc->state->mode_blob);
396
397 if (crtc->state) { 394 if (crtc->state) {
395 __drm_atomic_helper_crtc_destroy_state(crtc->state);
398 state = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); 396 state = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state);
399 kfree(state); 397 kfree(state);
398 crtc->state = NULL;
400 } 399 }
401 400
402 state = kzalloc(sizeof(*state), GFP_KERNEL); 401 state = kzalloc(sizeof(*state), GFP_KERNEL);
@@ -415,8 +414,9 @@ atmel_hlcdc_crtc_duplicate_state(struct drm_crtc *crtc)
415 return NULL; 414 return NULL;
416 415
417 state = kmalloc(sizeof(*state), GFP_KERNEL); 416 state = kmalloc(sizeof(*state), GFP_KERNEL);
418 if (state) 417 if (!state)
419 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); 418 return NULL;
419 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
420 420
421 cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); 421 cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state);
422 state->output_mode = cur->output_mode; 422 state->output_mode = cur->output_mode;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 8ded7645747e..9ecf16c7911d 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -519,7 +519,7 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
519 } 519 }
520 520
521 /* Swap the state, this is the point of no return. */ 521 /* Swap the state, this is the point of no return. */
522 drm_atomic_helper_swap_state(dev, state); 522 drm_atomic_helper_swap_state(state, true);
523 523
524 if (async) 524 if (async)
525 queue_work(dc->wq, &commit->work); 525 queue_work(dc->wq, &commit->work);
@@ -776,7 +776,7 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
776 .get_vblank_counter = drm_vblank_no_hw_counter, 776 .get_vblank_counter = drm_vblank_no_hw_counter,
777 .enable_vblank = atmel_hlcdc_dc_enable_vblank, 777 .enable_vblank = atmel_hlcdc_dc_enable_vblank,
778 .disable_vblank = atmel_hlcdc_dc_disable_vblank, 778 .disable_vblank = atmel_hlcdc_dc_disable_vblank,
779 .gem_free_object = drm_gem_cma_free_object, 779 .gem_free_object_unlocked = drm_gem_cma_free_object,
780 .gem_vm_ops = &drm_gem_cma_vm_ops, 780 .gem_vm_ops = &drm_gem_cma_vm_ops,
781 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 781 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
782 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 782 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 39802c0539b6..473a475f27b1 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -113,21 +113,9 @@ static int atmel_hlcdc_rgb_mode_valid(struct drm_connector *connector,
113 return atmel_hlcdc_dc_mode_valid(rgb->dc, mode); 113 return atmel_hlcdc_dc_mode_valid(rgb->dc, mode);
114} 114}
115 115
116
117
118static struct drm_encoder *
119atmel_hlcdc_rgb_best_encoder(struct drm_connector *connector)
120{
121 struct atmel_hlcdc_rgb_output *rgb =
122 drm_connector_to_atmel_hlcdc_rgb_output(connector);
123
124 return &rgb->encoder;
125}
126
127static const struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = { 116static const struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = {
128 .get_modes = atmel_hlcdc_panel_get_modes, 117 .get_modes = atmel_hlcdc_panel_get_modes,
129 .mode_valid = atmel_hlcdc_rgb_mode_valid, 118 .mode_valid = atmel_hlcdc_rgb_mode_valid,
130 .best_encoder = atmel_hlcdc_rgb_best_encoder,
131}; 119};
132 120
133static enum drm_connector_status 121static enum drm_connector_status
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index b332b4d3b0e2..abace82de6ea 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -89,7 +89,7 @@ static struct drm_driver bochs_driver = {
89 .date = "20130925", 89 .date = "20130925",
90 .major = 1, 90 .major = 1,
91 .minor = 0, 91 .minor = 0,
92 .gem_free_object = bochs_gem_free_object, 92 .gem_free_object_unlocked = bochs_gem_free_object,
93 .dumb_create = bochs_dumb_create, 93 .dumb_create = bochs_dumb_create,
94 .dumb_map_offset = bochs_dumb_mmap_offset, 94 .dumb_map_offset = bochs_dumb_mmap_offset,
95 .dumb_destroy = drm_gem_dumb_destroy, 95 .dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index d087b054c360..f9f03bcba0af 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -986,16 +986,8 @@ unlock:
986 return num_modes; 986 return num_modes;
987} 987}
988 988
989static struct drm_encoder *anx78xx_best_encoder(struct drm_connector *connector)
990{
991 struct anx78xx *anx78xx = connector_to_anx78xx(connector);
992
993 return anx78xx->bridge.encoder;
994}
995
996static const struct drm_connector_helper_funcs anx78xx_connector_helper_funcs = { 989static const struct drm_connector_helper_funcs anx78xx_connector_helper_funcs = {
997 .get_modes = anx78xx_get_modes, 990 .get_modes = anx78xx_get_modes,
998 .best_encoder = anx78xx_best_encoder,
999}; 991};
1000 992
1001static enum drm_connector_status anx78xx_detect(struct drm_connector *connector, 993static enum drm_connector_status anx78xx_detect(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index c9d941283d30..70b1f7d4270b 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -1476,15 +1476,6 @@ dw_hdmi_connector_mode_valid(struct drm_connector *connector,
1476 return mode_status; 1476 return mode_status;
1477} 1477}
1478 1478
1479static struct drm_encoder *dw_hdmi_connector_best_encoder(struct drm_connector
1480 *connector)
1481{
1482 struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
1483 connector);
1484
1485 return hdmi->encoder;
1486}
1487
1488static void dw_hdmi_connector_destroy(struct drm_connector *connector) 1479static void dw_hdmi_connector_destroy(struct drm_connector *connector)
1489{ 1480{
1490 drm_connector_unregister(connector); 1481 drm_connector_unregister(connector);
@@ -1525,7 +1516,7 @@ static const struct drm_connector_funcs dw_hdmi_atomic_connector_funcs = {
1525static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = { 1516static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
1526 .get_modes = dw_hdmi_connector_get_modes, 1517 .get_modes = dw_hdmi_connector_get_modes,
1527 .mode_valid = dw_hdmi_connector_mode_valid, 1518 .mode_valid = dw_hdmi_connector_mode_valid,
1528 .best_encoder = dw_hdmi_connector_best_encoder, 1519 .best_encoder = drm_atomic_helper_best_encoder,
1529}; 1520};
1530 1521
1531static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = { 1522static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 7ecd59f70b8e..93f3dacf9e27 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -235,16 +235,8 @@ out:
235 return num_modes; 235 return num_modes;
236} 236}
237 237
238static struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
239{
240 struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector);
241
242 return ptn_bridge->bridge.encoder;
243}
244
245static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = { 238static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
246 .get_modes = ptn3460_get_modes, 239 .get_modes = ptn3460_get_modes,
247 .best_encoder = ptn3460_best_encoder,
248}; 240};
249 241
250static enum drm_connector_status ptn3460_detect(struct drm_connector *connector, 242static enum drm_connector_status ptn3460_detect(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index be881e9fef8f..5cd8dd7e5904 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -474,18 +474,8 @@ static int ps8622_get_modes(struct drm_connector *connector)
474 return drm_panel_get_modes(ps8622->panel); 474 return drm_panel_get_modes(ps8622->panel);
475} 475}
476 476
477static struct drm_encoder *ps8622_best_encoder(struct drm_connector *connector)
478{
479 struct ps8622_bridge *ps8622;
480
481 ps8622 = connector_to_ps8622(connector);
482
483 return ps8622->bridge.encoder;
484}
485
486static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = { 477static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = {
487 .get_modes = ps8622_get_modes, 478 .get_modes = ps8622_get_modes,
488 .best_encoder = ps8622_best_encoder,
489}; 479};
490 480
491static enum drm_connector_status ps8622_detect(struct drm_connector *connector, 481static enum drm_connector_status ps8622_detect(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index dc83f69da6f1..b05f7eae32ce 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -142,7 +142,7 @@ static struct drm_driver driver = {
142 .major = DRIVER_MAJOR, 142 .major = DRIVER_MAJOR,
143 .minor = DRIVER_MINOR, 143 .minor = DRIVER_MINOR,
144 .patchlevel = DRIVER_PATCHLEVEL, 144 .patchlevel = DRIVER_PATCHLEVEL,
145 .gem_free_object = cirrus_gem_free_object, 145 .gem_free_object_unlocked = cirrus_gem_free_object,
146 .dumb_create = cirrus_dumb_create, 146 .dumb_create = cirrus_dumb_create,
147 .dumb_map_offset = cirrus_dumb_mmap_offset, 147 .dumb_map_offset = cirrus_dumb_mmap_offset,
148 .dumb_destroy = drm_gem_dumb_destroy, 148 .dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index d3d8d7bfcc57..17c915d9a03e 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -325,21 +325,20 @@ static void cirrus_crtc_commit(struct drm_crtc *crtc)
325 * use this for 8-bit mode so can't perform smooth fades on deeper modes, 325 * use this for 8-bit mode so can't perform smooth fades on deeper modes,
326 * but it's a requirement that we provide the function 326 * but it's a requirement that we provide the function
327 */ 327 */
328static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 328static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
329 u16 *blue, uint32_t start, uint32_t size) 329 u16 *blue, uint32_t size)
330{ 330{
331 struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc); 331 struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
332 int i; 332 int i;
333 333
334 if (size != CIRRUS_LUT_SIZE) 334 for (i = 0; i < size; i++) {
335 return;
336
337 for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
338 cirrus_crtc->lut_r[i] = red[i]; 335 cirrus_crtc->lut_r[i] = red[i];
339 cirrus_crtc->lut_g[i] = green[i]; 336 cirrus_crtc->lut_g[i] = green[i];
340 cirrus_crtc->lut_b[i] = blue[i]; 337 cirrus_crtc->lut_b[i] = blue[i];
341 } 338 }
342 cirrus_crtc_load_lut(crtc); 339 cirrus_crtc_load_lut(crtc);
340
341 return 0;
343} 342}
344 343
345/* Simple cleanup function */ 344/* Simple cleanup function */
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3ff1ed7b33db..d99ab2f6663f 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -33,6 +33,20 @@
33 33
34#include "drm_crtc_internal.h" 34#include "drm_crtc_internal.h"
35 35
36static void crtc_commit_free(struct kref *kref)
37{
38 struct drm_crtc_commit *commit =
39 container_of(kref, struct drm_crtc_commit, ref);
40
41 kfree(commit);
42}
43
44void drm_crtc_commit_put(struct drm_crtc_commit *commit)
45{
46 kref_put(&commit->ref, crtc_commit_free);
47}
48EXPORT_SYMBOL(drm_crtc_commit_put);
49
36/** 50/**
37 * drm_atomic_state_default_release - 51 * drm_atomic_state_default_release -
38 * release memory initialized by drm_atomic_state_init 52 * release memory initialized by drm_atomic_state_init
@@ -44,11 +58,8 @@
44void drm_atomic_state_default_release(struct drm_atomic_state *state) 58void drm_atomic_state_default_release(struct drm_atomic_state *state)
45{ 59{
46 kfree(state->connectors); 60 kfree(state->connectors);
47 kfree(state->connector_states);
48 kfree(state->crtcs); 61 kfree(state->crtcs);
49 kfree(state->crtc_states);
50 kfree(state->planes); 62 kfree(state->planes);
51 kfree(state->plane_states);
52} 63}
53EXPORT_SYMBOL(drm_atomic_state_default_release); 64EXPORT_SYMBOL(drm_atomic_state_default_release);
54 65
@@ -72,18 +83,10 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
72 sizeof(*state->crtcs), GFP_KERNEL); 83 sizeof(*state->crtcs), GFP_KERNEL);
73 if (!state->crtcs) 84 if (!state->crtcs)
74 goto fail; 85 goto fail;
75 state->crtc_states = kcalloc(dev->mode_config.num_crtc,
76 sizeof(*state->crtc_states), GFP_KERNEL);
77 if (!state->crtc_states)
78 goto fail;
79 state->planes = kcalloc(dev->mode_config.num_total_plane, 86 state->planes = kcalloc(dev->mode_config.num_total_plane,
80 sizeof(*state->planes), GFP_KERNEL); 87 sizeof(*state->planes), GFP_KERNEL);
81 if (!state->planes) 88 if (!state->planes)
82 goto fail; 89 goto fail;
83 state->plane_states = kcalloc(dev->mode_config.num_total_plane,
84 sizeof(*state->plane_states), GFP_KERNEL);
85 if (!state->plane_states)
86 goto fail;
87 90
88 state->dev = dev; 91 state->dev = dev;
89 92
@@ -139,40 +142,48 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
139 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); 142 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
140 143
141 for (i = 0; i < state->num_connector; i++) { 144 for (i = 0; i < state->num_connector; i++) {
142 struct drm_connector *connector = state->connectors[i]; 145 struct drm_connector *connector = state->connectors[i].ptr;
143 146
144 if (!connector) 147 if (!connector)
145 continue; 148 continue;
146 149
147 connector->funcs->atomic_destroy_state(connector, 150 connector->funcs->atomic_destroy_state(connector,
148 state->connector_states[i]); 151 state->connectors[i].state);
149 state->connectors[i] = NULL; 152 state->connectors[i].ptr = NULL;
150 state->connector_states[i] = NULL; 153 state->connectors[i].state = NULL;
151 drm_connector_unreference(connector); 154 drm_connector_unreference(connector);
152 } 155 }
153 156
154 for (i = 0; i < config->num_crtc; i++) { 157 for (i = 0; i < config->num_crtc; i++) {
155 struct drm_crtc *crtc = state->crtcs[i]; 158 struct drm_crtc *crtc = state->crtcs[i].ptr;
156 159
157 if (!crtc) 160 if (!crtc)
158 continue; 161 continue;
159 162
160 crtc->funcs->atomic_destroy_state(crtc, 163 crtc->funcs->atomic_destroy_state(crtc,
161 state->crtc_states[i]); 164 state->crtcs[i].state);
162 state->crtcs[i] = NULL; 165
163 state->crtc_states[i] = NULL; 166 if (state->crtcs[i].commit) {
167 kfree(state->crtcs[i].commit->event);
168 state->crtcs[i].commit->event = NULL;
169 drm_crtc_commit_put(state->crtcs[i].commit);
170 }
171
172 state->crtcs[i].commit = NULL;
173 state->crtcs[i].ptr = NULL;
174 state->crtcs[i].state = NULL;
164 } 175 }
165 176
166 for (i = 0; i < config->num_total_plane; i++) { 177 for (i = 0; i < config->num_total_plane; i++) {
167 struct drm_plane *plane = state->planes[i]; 178 struct drm_plane *plane = state->planes[i].ptr;
168 179
169 if (!plane) 180 if (!plane)
170 continue; 181 continue;
171 182
172 plane->funcs->atomic_destroy_state(plane, 183 plane->funcs->atomic_destroy_state(plane,
173 state->plane_states[i]); 184 state->planes[i].state);
174 state->planes[i] = NULL; 185 state->planes[i].ptr = NULL;
175 state->plane_states[i] = NULL; 186 state->planes[i].state = NULL;
176 } 187 }
177} 188}
178EXPORT_SYMBOL(drm_atomic_state_default_clear); 189EXPORT_SYMBOL(drm_atomic_state_default_clear);
@@ -270,8 +281,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
270 if (!crtc_state) 281 if (!crtc_state)
271 return ERR_PTR(-ENOMEM); 282 return ERR_PTR(-ENOMEM);
272 283
273 state->crtc_states[index] = crtc_state; 284 state->crtcs[index].state = crtc_state;
274 state->crtcs[index] = crtc; 285 state->crtcs[index].ptr = crtc;
275 crtc_state->state = state; 286 crtc_state->state = state;
276 287
277 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", 288 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
@@ -351,6 +362,8 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
351 drm_property_unreference_blob(state->mode_blob); 362 drm_property_unreference_blob(state->mode_blob);
352 state->mode_blob = NULL; 363 state->mode_blob = NULL;
353 364
365 memset(&state->mode, 0, sizeof(state->mode));
366
354 if (blob) { 367 if (blob) {
355 if (blob->length != sizeof(struct drm_mode_modeinfo) || 368 if (blob->length != sizeof(struct drm_mode_modeinfo) ||
356 drm_mode_convert_umode(&state->mode, 369 drm_mode_convert_umode(&state->mode,
@@ -363,7 +376,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
363 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n", 376 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
364 state->mode.name, state); 377 state->mode.name, state);
365 } else { 378 } else {
366 memset(&state->mode, 0, sizeof(state->mode));
367 state->enable = false; 379 state->enable = false;
368 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n", 380 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
369 state); 381 state);
@@ -631,8 +643,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
631 if (!plane_state) 643 if (!plane_state)
632 return ERR_PTR(-ENOMEM); 644 return ERR_PTR(-ENOMEM);
633 645
634 state->plane_states[index] = plane_state; 646 state->planes[index].state = plane_state;
635 state->planes[index] = plane; 647 state->planes[index].ptr = plane;
636 plane_state->state = state; 648 plane_state->state = state;
637 649
638 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", 650 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
@@ -896,8 +908,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
896 index = drm_connector_index(connector); 908 index = drm_connector_index(connector);
897 909
898 if (index >= state->num_connector) { 910 if (index >= state->num_connector) {
899 struct drm_connector **c; 911 struct __drm_connnectors_state *c;
900 struct drm_connector_state **cs;
901 int alloc = max(index + 1, config->num_connector); 912 int alloc = max(index + 1, config->num_connector);
902 913
903 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL); 914 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
@@ -908,26 +919,19 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
908 memset(&state->connectors[state->num_connector], 0, 919 memset(&state->connectors[state->num_connector], 0,
909 sizeof(*state->connectors) * (alloc - state->num_connector)); 920 sizeof(*state->connectors) * (alloc - state->num_connector));
910 921
911 cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL);
912 if (!cs)
913 return ERR_PTR(-ENOMEM);
914
915 state->connector_states = cs;
916 memset(&state->connector_states[state->num_connector], 0,
917 sizeof(*state->connector_states) * (alloc - state->num_connector));
918 state->num_connector = alloc; 922 state->num_connector = alloc;
919 } 923 }
920 924
921 if (state->connector_states[index]) 925 if (state->connectors[index].state)
922 return state->connector_states[index]; 926 return state->connectors[index].state;
923 927
924 connector_state = connector->funcs->atomic_duplicate_state(connector); 928 connector_state = connector->funcs->atomic_duplicate_state(connector);
925 if (!connector_state) 929 if (!connector_state)
926 return ERR_PTR(-ENOMEM); 930 return ERR_PTR(-ENOMEM);
927 931
928 drm_connector_reference(connector); 932 drm_connector_reference(connector);
929 state->connector_states[index] = connector_state; 933 state->connectors[index].state = connector_state;
930 state->connectors[index] = connector; 934 state->connectors[index].ptr = connector;
931 connector_state->state = state; 935 connector_state->state = state;
932 936
933 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n", 937 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n",
@@ -1431,7 +1435,8 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
1431 */ 1435 */
1432 1436
1433static struct drm_pending_vblank_event *create_vblank_event( 1437static struct drm_pending_vblank_event *create_vblank_event(
1434 struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data) 1438 struct drm_device *dev, struct drm_file *file_priv,
1439 struct fence *fence, uint64_t user_data)
1435{ 1440{
1436 struct drm_pending_vblank_event *e = NULL; 1441 struct drm_pending_vblank_event *e = NULL;
1437 int ret; 1442 int ret;
@@ -1444,12 +1449,17 @@ static struct drm_pending_vblank_event *create_vblank_event(
1444 e->event.base.length = sizeof(e->event); 1449 e->event.base.length = sizeof(e->event);
1445 e->event.user_data = user_data; 1450 e->event.user_data = user_data;
1446 1451
1447 ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base); 1452 if (file_priv) {
1448 if (ret) { 1453 ret = drm_event_reserve_init(dev, file_priv, &e->base,
1449 kfree(e); 1454 &e->event.base);
1450 return NULL; 1455 if (ret) {
1456 kfree(e);
1457 return NULL;
1458 }
1451 } 1459 }
1452 1460
1461 e->base.fence = fence;
1462
1453 return e; 1463 return e;
1454} 1464}
1455 1465
@@ -1689,7 +1699,8 @@ retry:
1689 for_each_crtc_in_state(state, crtc, crtc_state, i) { 1699 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1690 struct drm_pending_vblank_event *e; 1700 struct drm_pending_vblank_event *e;
1691 1701
1692 e = create_vblank_event(dev, file_priv, arg->user_data); 1702 e = create_vblank_event(dev, file_priv, NULL,
1703 arg->user_data);
1693 if (!e) { 1704 if (!e) {
1694 ret = -ENOMEM; 1705 ret = -ENOMEM;
1695 goto out; 1706 goto out;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ddfa0d120e39..6a13df8691d4 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -110,8 +110,10 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
110 110
111 if (funcs->atomic_best_encoder) 111 if (funcs->atomic_best_encoder)
112 new_encoder = funcs->atomic_best_encoder(connector, conn_state); 112 new_encoder = funcs->atomic_best_encoder(connector, conn_state);
113 else 113 else if (funcs->best_encoder)
114 new_encoder = funcs->best_encoder(connector); 114 new_encoder = funcs->best_encoder(connector);
115 else
116 new_encoder = drm_atomic_helper_best_encoder(connector);
115 117
116 if (new_encoder) { 118 if (new_encoder) {
117 if (encoder_mask & (1 << drm_encoder_index(new_encoder))) { 119 if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
@@ -298,8 +300,10 @@ update_connector_routing(struct drm_atomic_state *state,
298 if (funcs->atomic_best_encoder) 300 if (funcs->atomic_best_encoder)
299 new_encoder = funcs->atomic_best_encoder(connector, 301 new_encoder = funcs->atomic_best_encoder(connector,
300 connector_state); 302 connector_state);
301 else 303 else if (funcs->best_encoder)
302 new_encoder = funcs->best_encoder(connector); 304 new_encoder = funcs->best_encoder(connector);
305 else
306 new_encoder = drm_atomic_helper_best_encoder(connector);
303 307
304 if (!new_encoder) { 308 if (!new_encoder) {
305 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", 309 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -414,6 +418,9 @@ mode_fixup(struct drm_atomic_state *state)
414 for_each_crtc_in_state(state, crtc, crtc_state, i) { 418 for_each_crtc_in_state(state, crtc, crtc_state, i) {
415 const struct drm_crtc_helper_funcs *funcs; 419 const struct drm_crtc_helper_funcs *funcs;
416 420
421 if (!crtc_state->enable)
422 continue;
423
417 if (!crtc_state->mode_changed && 424 if (!crtc_state->mode_changed &&
418 !crtc_state->connectors_changed) 425 !crtc_state->connectors_changed)
419 continue; 426 continue;
@@ -458,7 +465,7 @@ mode_fixup(struct drm_atomic_state *state)
458 * times for the same update, e.g. when the ->atomic_check functions depend upon 465 * times for the same update, e.g. when the ->atomic_check functions depend upon
459 * the adjusted dotclock for fifo space allocation and watermark computation. 466 * the adjusted dotclock for fifo space allocation and watermark computation.
460 * 467 *
461 * RETURNS 468 * RETURNS:
462 * Zero for success or -errno 469 * Zero for success or -errno
463 */ 470 */
464int 471int
@@ -572,7 +579,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
572 * It also sets crtc_state->planes_changed to indicate that a crtc has 579 * It also sets crtc_state->planes_changed to indicate that a crtc has
573 * updated planes. 580 * updated planes.
574 * 581 *
575 * RETURNS 582 * RETURNS:
576 * Zero for success or -errno 583 * Zero for success or -errno
577 */ 584 */
578int 585int
@@ -611,7 +618,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
611 if (!funcs || !funcs->atomic_check) 618 if (!funcs || !funcs->atomic_check)
612 continue; 619 continue;
613 620
614 ret = funcs->atomic_check(crtc, state->crtc_states[i]); 621 ret = funcs->atomic_check(crtc, crtc_state);
615 if (ret) { 622 if (ret) {
616 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", 623 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
617 crtc->base.id, crtc->name); 624 crtc->base.id, crtc->name);
@@ -640,7 +647,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes);
640 * ->atomic_check functions depend upon an updated adjusted_mode.clock to 647 * ->atomic_check functions depend upon an updated adjusted_mode.clock to
641 * e.g. properly compute watermarks. 648 * e.g. properly compute watermarks.
642 * 649 *
643 * RETURNS 650 * RETURNS:
644 * Zero for success or -errno 651 * Zero for success or -errno
645 */ 652 */
646int drm_atomic_helper_check(struct drm_device *dev, 653int drm_atomic_helper_check(struct drm_device *dev,
@@ -1113,22 +1120,17 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
1113EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); 1120EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1114 1121
1115/** 1122/**
1116 * drm_atomic_helper_commit - commit validated state object 1123 * drm_atomic_helper_commit_tail - commit atomic update to hardware
1117 * @dev: DRM device 1124 * @state: new modeset state to be committed
1118 * @state: the driver state object
1119 * @nonblocking: whether nonblocking behavior is requested.
1120 * 1125 *
1121 * This function commits a with drm_atomic_helper_check() pre-validated state 1126 * This is the default implemenation for the ->atomic_commit_tail() hook of the
1122 * object. This can still fail when e.g. the framebuffer reservation fails. For 1127 * &drm_mode_config_helper_funcs vtable.
1123 * now this doesn't implement nonblocking commits.
1124 * 1128 *
1125 * Note that right now this function does not support nonblocking commits, hence 1129 * Note that the default ordering of how the various stages are called is to
1126 * driver writers must implement their own version for now. Also note that the 1130 * match the legacy modeset helper library closest. One peculiarity of that is
1127 * default ordering of how the various stages are called is to match the legacy 1131 * that it doesn't mesh well with runtime PM at all.
1128 * modeset helper library closest. One peculiarity of that is that it doesn't
1129 * mesh well with runtime PM at all.
1130 * 1132 *
1131 * For drivers supporting runtime PM the recommended sequence is 1133 * For drivers supporting runtime PM the recommended sequence is instead ::
1132 * 1134 *
1133 * drm_atomic_helper_commit_modeset_disables(dev, state); 1135 * drm_atomic_helper_commit_modeset_disables(dev, state);
1134 * 1136 *
@@ -1136,9 +1138,75 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1136 * 1138 *
1137 * drm_atomic_helper_commit_planes(dev, state, true); 1139 * drm_atomic_helper_commit_planes(dev, state, true);
1138 * 1140 *
1139 * See the kerneldoc entries for these three functions for more details. 1141 * for committing the atomic update to hardware. See the kerneldoc entries for
1142 * these three functions for more details.
1143 */
1144void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
1145{
1146 struct drm_device *dev = state->dev;
1147
1148 drm_atomic_helper_commit_modeset_disables(dev, state);
1149
1150 drm_atomic_helper_commit_planes(dev, state, false);
1151
1152 drm_atomic_helper_commit_modeset_enables(dev, state);
1153
1154 drm_atomic_helper_commit_hw_done(state);
1155
1156 drm_atomic_helper_wait_for_vblanks(dev, state);
1157
1158 drm_atomic_helper_cleanup_planes(dev, state);
1159}
1160EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
1161
1162static void commit_tail(struct drm_atomic_state *state)
1163{
1164 struct drm_device *dev = state->dev;
1165 struct drm_mode_config_helper_funcs *funcs;
1166
1167 funcs = dev->mode_config.helper_private;
1168
1169 drm_atomic_helper_wait_for_fences(dev, state);
1170
1171 drm_atomic_helper_wait_for_dependencies(state);
1172
1173 if (funcs && funcs->atomic_commit_tail)
1174 funcs->atomic_commit_tail(state);
1175 else
1176 drm_atomic_helper_commit_tail(state);
1177
1178 drm_atomic_helper_commit_cleanup_done(state);
1179
1180 drm_atomic_state_free(state);
1181}
1182
1183static void commit_work(struct work_struct *work)
1184{
1185 struct drm_atomic_state *state = container_of(work,
1186 struct drm_atomic_state,
1187 commit_work);
1188 commit_tail(state);
1189}
1190
1191/**
1192 * drm_atomic_helper_commit - commit validated state object
1193 * @dev: DRM device
1194 * @state: the driver state object
1195 * @nonblock: whether nonblocking behavior is requested.
1196 *
1197 * This function commits a with drm_atomic_helper_check() pre-validated state
1198 * object. This can still fail when e.g. the framebuffer reservation fails. This
1199 * function implements nonblocking commits, using
1200 * drm_atomic_helper_setup_commit() and related functions.
1201 *
1202 * Note that right now this function does not support nonblocking commits, hence
1203 * driver writers must implement their own version for now.
1204 *
1205 * Committing the actual hardware state is done through the
1206 * ->atomic_commit_tail() callback of the &drm_mode_config_helper_funcs vtable,
1207 * or it's default implementation drm_atomic_helper_commit_tail().
1140 * 1208 *
1141 * RETURNS 1209 * RETURNS:
1142 * Zero for success or -errno. 1210 * Zero for success or -errno.
1143 */ 1211 */
1144int drm_atomic_helper_commit(struct drm_device *dev, 1212int drm_atomic_helper_commit(struct drm_device *dev,
@@ -1147,8 +1215,11 @@ int drm_atomic_helper_commit(struct drm_device *dev,
1147{ 1215{
1148 int ret; 1216 int ret;
1149 1217
1150 if (nonblock) 1218 ret = drm_atomic_helper_setup_commit(state, nonblock);
1151 return -EBUSY; 1219 if (ret)
1220 return ret;
1221
1222 INIT_WORK(&state->commit_work, commit_work);
1152 1223
1153 ret = drm_atomic_helper_prepare_planes(dev, state); 1224 ret = drm_atomic_helper_prepare_planes(dev, state);
1154 if (ret) 1225 if (ret)
@@ -1160,7 +1231,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
1160 * the software side now. 1231 * the software side now.
1161 */ 1232 */
1162 1233
1163 drm_atomic_helper_swap_state(dev, state); 1234 drm_atomic_helper_swap_state(state, true);
1164 1235
1165 /* 1236 /*
1166 * Everything below can be run asynchronously without the need to grab 1237 * Everything below can be run asynchronously without the need to grab
@@ -1176,21 +1247,16 @@ int drm_atomic_helper_commit(struct drm_device *dev,
1176 * update. Which is important since compositors need to figure out the 1247 * update. Which is important since compositors need to figure out the
1177 * composition of the next frame right after having submitted the 1248 * composition of the next frame right after having submitted the
1178 * current layout. 1249 * current layout.
1250 *
1251 * NOTE: Commit work has multiple phases, first hardware commit, then
1252 * cleanup. We want them to overlap, hence need system_unbound_wq to
1253 * make sure work items don't artifically stall on each another.
1179 */ 1254 */
1180 1255
1181 drm_atomic_helper_wait_for_fences(dev, state); 1256 if (nonblock)
1182 1257 queue_work(system_unbound_wq, &state->commit_work);
1183 drm_atomic_helper_commit_modeset_disables(dev, state); 1258 else
1184 1259 commit_tail(state);
1185 drm_atomic_helper_commit_planes(dev, state, false);
1186
1187 drm_atomic_helper_commit_modeset_enables(dev, state);
1188
1189 drm_atomic_helper_wait_for_vblanks(dev, state);
1190
1191 drm_atomic_helper_cleanup_planes(dev, state);
1192
1193 drm_atomic_state_free(state);
1194 1260
1195 return 0; 1261 return 0;
1196} 1262}
@@ -1199,12 +1265,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
1199/** 1265/**
1200 * DOC: implementing nonblocking commit 1266 * DOC: implementing nonblocking commit
1201 * 1267 *
1202 * For now the atomic helpers don't support nonblocking commit directly. If 1268 * Nonblocking atomic commits have to be implemented in the following sequence:
1203 * there is real need it could be added though, using the dma-buf fence
1204 * infrastructure for generic synchronization with outstanding rendering.
1205 *
1206 * For now drivers have to implement nonblocking commit themselves, with the
1207 * following sequence being the recommended one:
1208 * 1269 *
1209 * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function 1270 * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
1210 * which commit needs to call which can fail, so we want to run it first and 1271 * which commit needs to call which can fail, so we want to run it first and
@@ -1216,10 +1277,14 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
1216 * cancelled updates. Note that it is important to ensure that the framebuffer 1277 * cancelled updates. Note that it is important to ensure that the framebuffer
1217 * cleanup is still done when cancelling. 1278 * cleanup is still done when cancelling.
1218 * 1279 *
1219 * For sufficient parallelism it is recommended to have a work item per crtc 1280 * Asynchronous workers need to have sufficient parallelism to be able to run
1220 * (for updates which don't touch global state) and a global one. Then we only 1281 * different atomic commits on different CRTCs in parallel. The simplest way to
1221 * need to synchronize with the crtc work items for changed crtcs and the global 1282 * achive this is by running them on the &system_unbound_wq work queue. Note
1222 * work item, which allows nice concurrent updates on disjoint sets of crtcs. 1283 * that drivers are not required to split up atomic commits and run an
1284 * individual commit in parallel - userspace is supposed to do that if it cares.
1285 * But it might be beneficial to do that for modesets, since those necessarily
1286 * must be done as one global operation, and enabling or disabling a CRTC can
1287 * take a long time. But even that is not required.
1223 * 1288 *
1224 * 3. The software state is updated synchronously with 1289 * 3. The software state is updated synchronously with
1225 * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset 1290 * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
@@ -1232,7 +1297,311 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
1232 * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and 1297 * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
1233 * then cleaning up the framebuffers after the old framebuffer is no longer 1298 * then cleaning up the framebuffers after the old framebuffer is no longer
1234 * being displayed. 1299 * being displayed.
1300 *
1301 * The above scheme is implemented in the atomic helper libraries in
1302 * drm_atomic_helper_commit() using a bunch of helper functions. See
1303 * drm_atomic_helper_setup_commit() for a starting point.
1304 */
1305
1306static int stall_checks(struct drm_crtc *crtc, bool nonblock)
1307{
1308 struct drm_crtc_commit *commit, *stall_commit = NULL;
1309 bool completed = true;
1310 int i;
1311 long ret = 0;
1312
1313 spin_lock(&crtc->commit_lock);
1314 i = 0;
1315 list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
1316 if (i == 0) {
1317 completed = try_wait_for_completion(&commit->flip_done);
1318 /* Userspace is not allowed to get ahead of the previous
1319 * commit with nonblocking ones. */
1320 if (!completed && nonblock) {
1321 spin_unlock(&crtc->commit_lock);
1322 return -EBUSY;
1323 }
1324 } else if (i == 1) {
1325 stall_commit = commit;
1326 drm_crtc_commit_get(stall_commit);
1327 } else
1328 break;
1329
1330 i++;
1331 }
1332 spin_unlock(&crtc->commit_lock);
1333
1334 if (!stall_commit)
1335 return 0;
1336
1337 /* We don't want to let commits get ahead of cleanup work too much,
1338 * stalling on 2nd previous commit means triple-buffer won't ever stall.
1339 */
1340 ret = wait_for_completion_interruptible_timeout(&commit->cleanup_done,
1341 10*HZ);
1342 if (ret == 0)
1343 DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n",
1344 crtc->base.id, crtc->name);
1345
1346 drm_crtc_commit_put(stall_commit);
1347
1348 return ret < 0 ? ret : 0;
1349}
1350
1351/**
1352 * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
1353 * @state: new modeset state to be committed
1354 * @nonblock: whether nonblocking behavior is requested.
1355 *
1356 * This function prepares @state to be used by the atomic helper's support for
1357 * nonblocking commits. Drivers using the nonblocking commit infrastructure
1358 * should always call this function from their ->atomic_commit hook.
1359 *
1360 * To be able to use this support drivers need to use a few more helper
1361 * functions. drm_atomic_helper_wait_for_dependencies() must be called before
1362 * actually committing the hardware state, and for nonblocking commits this call
1363 * must be placed in the async worker. See also drm_atomic_helper_swap_state()
1364 * and it's stall parameter, for when a driver's commit hooks look at the
1365 * ->state pointers of struct &drm_crtc, &drm_plane or &drm_connector directly.
1366 *
1367 * Completion of the hardware commit step must be signalled using
1368 * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
1369 * to read or change any permanent software or hardware modeset state. The only
1370 * exception is state protected by other means than &drm_modeset_lock locks.
1371 * Only the free standing @state with pointers to the old state structures can
1372 * be inspected, e.g. to clean up old buffers using
1373 * drm_atomic_helper_cleanup_planes().
1374 *
1375 * At the very end, before cleaning up @state drivers must call
1376 * drm_atomic_helper_commit_cleanup_done().
1377 *
1378 * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
1379 * complete and esay-to-use default implementation of the atomic_commit() hook.
1380 *
1381 * The tracking of asynchronously executed and still pending commits is done
1382 * using the core structure &drm_crtc_commit.
1383 *
1384 * By default there's no need to clean up resources allocated by this function
1385 * explicitly: drm_atomic_state_default_clear() will take care of that
1386 * automatically.
1387 *
1388 * Returns:
1389 *
1390 * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
1391 * -ENOMEM on allocation failures and -EINTR when a signal is pending.
1392 */
1393int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
1394 bool nonblock)
1395{
1396 struct drm_crtc *crtc;
1397 struct drm_crtc_state *crtc_state;
1398 struct drm_crtc_commit *commit;
1399 int i, ret;
1400
1401 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1402 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
1403 if (!commit)
1404 return -ENOMEM;
1405
1406 init_completion(&commit->flip_done);
1407 init_completion(&commit->hw_done);
1408 init_completion(&commit->cleanup_done);
1409 INIT_LIST_HEAD(&commit->commit_entry);
1410 kref_init(&commit->ref);
1411 commit->crtc = crtc;
1412
1413 state->crtcs[i].commit = commit;
1414
1415 ret = stall_checks(crtc, nonblock);
1416 if (ret)
1417 return ret;
1418
1419 /* Drivers only send out events when at least either current or
1420 * new CRTC state is active. Complete right away if everything
1421 * stays off. */
1422 if (!crtc->state->active && !crtc_state->active) {
1423 complete_all(&commit->flip_done);
1424 continue;
1425 }
1426
1427 /* Legacy cursor updates are fully unsynced. */
1428 if (state->legacy_cursor_update) {
1429 complete_all(&commit->flip_done);
1430 continue;
1431 }
1432
1433 if (!crtc_state->event) {
1434 commit->event = kzalloc(sizeof(*commit->event),
1435 GFP_KERNEL);
1436 if (!commit->event)
1437 return -ENOMEM;
1438
1439 crtc_state->event = commit->event;
1440 }
1441
1442 crtc_state->event->base.completion = &commit->flip_done;
1443 }
1444
1445 return 0;
1446}
1447EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
1448
1449
1450static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
1451{
1452 struct drm_crtc_commit *commit;
1453 int i = 0;
1454
1455 list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
1456 /* skip the first entry, that's the current commit */
1457 if (i == 1)
1458 return commit;
1459 i++;
1460 }
1461
1462 return NULL;
1463}
1464
1465/**
1466 * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
1467 * @state: new modeset state to be committed
1468 *
1469 * This function waits for all preceeding commits that touch the same CRTC as
1470 * @state to both be committed to the hardware (as signalled by
1471 * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
1472 * by calling drm_crtc_vblank_send_event on the event member of
1473 * &drm_crtc_state).
1474 *
1475 * This is part of the atomic helper support for nonblocking commits, see
1476 * drm_atomic_helper_setup_commit() for an overview.
1477 */
1478void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
1479{
1480 struct drm_crtc *crtc;
1481 struct drm_crtc_state *crtc_state;
1482 struct drm_crtc_commit *commit;
1483 int i;
1484 long ret;
1485
1486 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1487 spin_lock(&crtc->commit_lock);
1488 commit = preceeding_commit(crtc);
1489 if (commit)
1490 drm_crtc_commit_get(commit);
1491 spin_unlock(&crtc->commit_lock);
1492
1493 if (!commit)
1494 continue;
1495
1496 ret = wait_for_completion_timeout(&commit->hw_done,
1497 10*HZ);
1498 if (ret == 0)
1499 DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
1500 crtc->base.id, crtc->name);
1501
1502 /* Currently no support for overwriting flips, hence
1503 * stall for previous one to execute completely. */
1504 ret = wait_for_completion_timeout(&commit->flip_done,
1505 10*HZ);
1506 if (ret == 0)
1507 DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
1508 crtc->base.id, crtc->name);
1509
1510 drm_crtc_commit_put(commit);
1511 }
1512}
1513EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
1514
1515/**
1516 * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
1517 * @state: new modeset state to be committed
1518 *
1519 * This function is used to signal completion of the hardware commit step. After
1520 * this step the driver is not allowed to read or change any permanent software
1521 * or hardware modeset state. The only exception is state protected by other
1522 * means than &drm_modeset_lock locks.
1523 *
1524 * Drivers should try to postpone any expensive or delayed cleanup work after
1525 * this function is called.
1526 *
1527 * This is part of the atomic helper support for nonblocking commits, see
1528 * drm_atomic_helper_setup_commit() for an overview.
1529 */
1530void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state)
1531{
1532 struct drm_crtc *crtc;
1533 struct drm_crtc_state *crtc_state;
1534 struct drm_crtc_commit *commit;
1535 int i;
1536
1537 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1538 commit = state->crtcs[i].commit;
1539 if (!commit)
1540 continue;
1541
1542 /* backend must have consumed any event by now */
1543 WARN_ON(crtc->state->event);
1544 spin_lock(&crtc->commit_lock);
1545 complete_all(&commit->hw_done);
1546 spin_unlock(&crtc->commit_lock);
1547 }
1548}
1549EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
1550
1551/**
1552 * drm_atomic_helper_commit_cleanup_done - signal completion of commit
1553 * @state: new modeset state to be committed
1554 *
1555 * This signals completion of the atomic update @state, including any cleanup
1556 * work. If used, it must be called right before calling
1557 * drm_atomic_state_free().
1558 *
1559 * This is part of the atomic helper support for nonblocking commits, see
1560 * drm_atomic_helper_setup_commit() for an overview.
1235 */ 1561 */
1562void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
1563{
1564 struct drm_crtc *crtc;
1565 struct drm_crtc_state *crtc_state;
1566 struct drm_crtc_commit *commit;
1567 int i;
1568 long ret;
1569
1570 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1571 commit = state->crtcs[i].commit;
1572 if (WARN_ON(!commit))
1573 continue;
1574
1575 spin_lock(&crtc->commit_lock);
1576 complete_all(&commit->cleanup_done);
1577 WARN_ON(!try_wait_for_completion(&commit->hw_done));
1578
1579 /* commit_list borrows our reference, need to remove before we
1580 * clean up our drm_atomic_state. But only after it actually
1581 * completed, otherwise subsequent commits won't stall properly. */
1582 if (try_wait_for_completion(&commit->flip_done)) {
1583 list_del(&commit->commit_entry);
1584 spin_unlock(&crtc->commit_lock);
1585 continue;
1586 }
1587
1588 spin_unlock(&crtc->commit_lock);
1589
1590 /* We must wait for the vblank event to signal our completion
1591 * before releasing our reference, since the vblank work does
1592 * not hold a reference of its own. */
1593 ret = wait_for_completion_timeout(&commit->flip_done,
1594 10*HZ);
1595 if (ret == 0)
1596 DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
1597 crtc->base.id, crtc->name);
1598
1599 spin_lock(&crtc->commit_lock);
1600 list_del(&commit->commit_entry);
1601 spin_unlock(&crtc->commit_lock);
1602 }
1603}
1604EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
1236 1605
1237/** 1606/**
1238 * drm_atomic_helper_prepare_planes - prepare plane resources before commit 1607 * drm_atomic_helper_prepare_planes - prepare plane resources before commit
@@ -1249,16 +1618,12 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
1249int drm_atomic_helper_prepare_planes(struct drm_device *dev, 1618int drm_atomic_helper_prepare_planes(struct drm_device *dev,
1250 struct drm_atomic_state *state) 1619 struct drm_atomic_state *state)
1251{ 1620{
1252 int nplanes = dev->mode_config.num_total_plane; 1621 struct drm_plane *plane;
1253 int ret, i; 1622 struct drm_plane_state *plane_state;
1623 int ret, i, j;
1254 1624
1255 for (i = 0; i < nplanes; i++) { 1625 for_each_plane_in_state(state, plane, plane_state, i) {
1256 const struct drm_plane_helper_funcs *funcs; 1626 const struct drm_plane_helper_funcs *funcs;
1257 struct drm_plane *plane = state->planes[i];
1258 struct drm_plane_state *plane_state = state->plane_states[i];
1259
1260 if (!plane)
1261 continue;
1262 1627
1263 funcs = plane->helper_private; 1628 funcs = plane->helper_private;
1264 1629
@@ -1272,12 +1637,10 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
1272 return 0; 1637 return 0;
1273 1638
1274fail: 1639fail:
1275 for (i--; i >= 0; i--) { 1640 for_each_plane_in_state(state, plane, plane_state, j) {
1276 const struct drm_plane_helper_funcs *funcs; 1641 const struct drm_plane_helper_funcs *funcs;
1277 struct drm_plane *plane = state->planes[i];
1278 struct drm_plane_state *plane_state = state->plane_states[i];
1279 1642
1280 if (!plane) 1643 if (j >= i)
1281 continue; 1644 continue;
1282 1645
1283 funcs = plane->helper_private; 1646 funcs = plane->helper_private;
@@ -1537,8 +1900,8 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
1537 1900
1538/** 1901/**
1539 * drm_atomic_helper_swap_state - store atomic state into current sw state 1902 * drm_atomic_helper_swap_state - store atomic state into current sw state
1540 * @dev: DRM device
1541 * @state: atomic state 1903 * @state: atomic state
1904 * @stall: stall for proceeding commits
1542 * 1905 *
1543 * This function stores the atomic state into the current state pointers in all 1906 * This function stores the atomic state into the current state pointers in all
1544 * driver objects. It should be called after all failing steps have been done 1907 * driver objects. It should be called after all failing steps have been done
@@ -1559,42 +1922,70 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
1559 * 1922 *
1560 * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3 1923 * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
1561 * contains the old state. Also do any other cleanup required with that state. 1924 * contains the old state. Also do any other cleanup required with that state.
1925 *
1926 * @stall must be set when nonblocking commits for this driver directly access
1927 * the ->state pointer of &drm_plane, &drm_crtc or &drm_connector. With the
1928 * current atomic helpers this is almost always the case, since the helpers
1929 * don't pass the right state structures to the callbacks.
1562 */ 1930 */
1563void drm_atomic_helper_swap_state(struct drm_device *dev, 1931void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
1564 struct drm_atomic_state *state) 1932 bool stall)
1565{ 1933{
1566 int i; 1934 int i;
1935 long ret;
1936 struct drm_connector *connector;
1937 struct drm_connector_state *conn_state;
1938 struct drm_crtc *crtc;
1939 struct drm_crtc_state *crtc_state;
1940 struct drm_plane *plane;
1941 struct drm_plane_state *plane_state;
1942 struct drm_crtc_commit *commit;
1943
1944 if (stall) {
1945 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1946 spin_lock(&crtc->commit_lock);
1947 commit = list_first_entry_or_null(&crtc->commit_list,
1948 struct drm_crtc_commit, commit_entry);
1949 if (commit)
1950 drm_crtc_commit_get(commit);
1951 spin_unlock(&crtc->commit_lock);
1952
1953 if (!commit)
1954 continue;
1567 1955
1568 for (i = 0; i < state->num_connector; i++) { 1956 ret = wait_for_completion_timeout(&commit->hw_done,
1569 struct drm_connector *connector = state->connectors[i]; 1957 10*HZ);
1570 1958 if (ret == 0)
1571 if (!connector) 1959 DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
1572 continue; 1960 crtc->base.id, crtc->name);
1961 drm_crtc_commit_put(commit);
1962 }
1963 }
1573 1964
1965 for_each_connector_in_state(state, connector, conn_state, i) {
1574 connector->state->state = state; 1966 connector->state->state = state;
1575 swap(state->connector_states[i], connector->state); 1967 swap(state->connectors[i].state, connector->state);
1576 connector->state->state = NULL; 1968 connector->state->state = NULL;
1577 } 1969 }
1578 1970
1579 for (i = 0; i < dev->mode_config.num_crtc; i++) { 1971 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1580 struct drm_crtc *crtc = state->crtcs[i];
1581
1582 if (!crtc)
1583 continue;
1584
1585 crtc->state->state = state; 1972 crtc->state->state = state;
1586 swap(state->crtc_states[i], crtc->state); 1973 swap(state->crtcs[i].state, crtc->state);
1587 crtc->state->state = NULL; 1974 crtc->state->state = NULL;
1588 }
1589 1975
1590 for (i = 0; i < dev->mode_config.num_total_plane; i++) { 1976 if (state->crtcs[i].commit) {
1591 struct drm_plane *plane = state->planes[i]; 1977 spin_lock(&crtc->commit_lock);
1978 list_add(&state->crtcs[i].commit->commit_entry,
1979 &crtc->commit_list);
1980 spin_unlock(&crtc->commit_lock);
1592 1981
1593 if (!plane) 1982 state->crtcs[i].commit->event = NULL;
1594 continue; 1983 }
1984 }
1595 1985
1986 for_each_plane_in_state(state, plane, plane_state, i) {
1596 plane->state->state = state; 1987 plane->state->state = state;
1597 swap(state->plane_states[i], plane->state); 1988 swap(state->planes[i].state, plane->state);
1598 plane->state->state = NULL; 1989 plane->state->state = NULL;
1599 } 1990 }
1600} 1991}
@@ -2409,7 +2800,7 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip);
2409 * This is the main helper function provided by the atomic helper framework for 2800 * This is the main helper function provided by the atomic helper framework for
2410 * implementing the legacy DPMS connector interface. It computes the new desired 2801 * implementing the legacy DPMS connector interface. It computes the new desired
2411 * ->active state for the corresponding CRTC (if the connector is enabled) and 2802 * ->active state for the corresponding CRTC (if the connector is enabled) and
2412 * updates it. 2803 * updates it.
2413 * 2804 *
2414 * Returns: 2805 * Returns:
2415 * Returns 0 on success, negative errno numbers on failure. 2806 * Returns 0 on success, negative errno numbers on failure.
@@ -2930,16 +3321,15 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
2930 * @red: red correction table 3321 * @red: red correction table
2931 * @green: green correction table 3322 * @green: green correction table
2932 * @blue: green correction table 3323 * @blue: green correction table
2933 * @start:
2934 * @size: size of the tables 3324 * @size: size of the tables
2935 * 3325 *
2936 * Implements support for legacy gamma correction table for drivers 3326 * Implements support for legacy gamma correction table for drivers
2937 * that support color management through the DEGAMMA_LUT/GAMMA_LUT 3327 * that support color management through the DEGAMMA_LUT/GAMMA_LUT
2938 * properties. 3328 * properties.
2939 */ 3329 */
2940void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, 3330int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
2941 u16 *red, u16 *green, u16 *blue, 3331 u16 *red, u16 *green, u16 *blue,
2942 uint32_t start, uint32_t size) 3332 uint32_t size)
2943{ 3333{
2944 struct drm_device *dev = crtc->dev; 3334 struct drm_device *dev = crtc->dev;
2945 struct drm_mode_config *config = &dev->mode_config; 3335 struct drm_mode_config *config = &dev->mode_config;
@@ -2951,7 +3341,7 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
2951 3341
2952 state = drm_atomic_state_alloc(crtc->dev); 3342 state = drm_atomic_state_alloc(crtc->dev);
2953 if (!state) 3343 if (!state)
2954 return; 3344 return -ENOMEM;
2955 3345
2956 blob = drm_property_create_blob(dev, 3346 blob = drm_property_create_blob(dev,
2957 sizeof(struct drm_color_lut) * size, 3347 sizeof(struct drm_color_lut) * size,
@@ -3002,7 +3392,7 @@ retry:
3002 3392
3003 drm_property_unreference_blob(blob); 3393 drm_property_unreference_blob(blob);
3004 3394
3005 return; 3395 return 0;
3006fail: 3396fail:
3007 if (ret == -EDEADLK) 3397 if (ret == -EDEADLK)
3008 goto backoff; 3398 goto backoff;
@@ -3010,7 +3400,7 @@ fail:
3010 drm_atomic_state_free(state); 3400 drm_atomic_state_free(state);
3011 drm_property_unreference_blob(blob); 3401 drm_property_unreference_blob(blob);
3012 3402
3013 return; 3403 return ret;
3014backoff: 3404backoff:
3015 drm_atomic_state_clear(state); 3405 drm_atomic_state_clear(state);
3016 drm_atomic_legacy_backoff(state); 3406 drm_atomic_legacy_backoff(state);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index b3654404abd0..255543086590 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -36,7 +36,7 @@
36 * encoder chain. 36 * encoder chain.
37 * 37 *
38 * A bridge is always attached to a single &drm_encoder at a time, but can be 38 * A bridge is always attached to a single &drm_encoder at a time, but can be
39 * either connected to it directly, or through an intermediate bridge: 39 * either connected to it directly, or through an intermediate bridge::
40 * 40 *
41 * encoder ---> bridge B ---> bridge A 41 * encoder ---> bridge B ---> bridge A
42 * 42 *
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d2a6d958ca76..4ec35f9e6de5 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -239,37 +239,6 @@ const char *drm_get_subpixel_order_name(enum subpixel_order order)
239} 239}
240EXPORT_SYMBOL(drm_get_subpixel_order_name); 240EXPORT_SYMBOL(drm_get_subpixel_order_name);
241 241
242static char printable_char(int c)
243{
244 return isascii(c) && isprint(c) ? c : '?';
245}
246
247/**
248 * drm_get_format_name - return a string for drm fourcc format
249 * @format: format to compute name of
250 *
251 * Note that the buffer used by this function is globally shared and owned by
252 * the function itself.
253 *
254 * FIXME: This isn't really multithreading safe.
255 */
256const char *drm_get_format_name(uint32_t format)
257{
258 static char buf[32];
259
260 snprintf(buf, sizeof(buf),
261 "%c%c%c%c %s-endian (0x%08x)",
262 printable_char(format & 0xff),
263 printable_char((format >> 8) & 0xff),
264 printable_char((format >> 16) & 0xff),
265 printable_char((format >> 24) & 0x7f),
266 format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
267 format);
268
269 return buf;
270}
271EXPORT_SYMBOL(drm_get_format_name);
272
273/* 242/*
274 * Internal function to assign a slot in the object idr and optionally 243 * Internal function to assign a slot in the object idr and optionally
275 * register the object into the idr. 244 * register the object into the idr.
@@ -535,7 +504,7 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
535 * 504 *
536 * Cleanup framebuffer. This function is intended to be used from the drivers 505 * Cleanup framebuffer. This function is intended to be used from the drivers
537 * ->destroy callback. It can also be used to clean up driver private 506 * ->destroy callback. It can also be used to clean up driver private
538 * framebuffers embedded into a larger structure. 507 * framebuffers embedded into a larger structure.
539 * 508 *
540 * Note that this function does not remove the fb from active usuage - if it is 509 * Note that this function does not remove the fb from active usuage - if it is
541 * still used anywhere, hilarity can ensue since userspace could call getfb on 510 * still used anywhere, hilarity can ensue since userspace could call getfb on
@@ -669,6 +638,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
669 crtc->dev = dev; 638 crtc->dev = dev;
670 crtc->funcs = funcs; 639 crtc->funcs = funcs;
671 640
641 INIT_LIST_HEAD(&crtc->commit_list);
642 spin_lock_init(&crtc->commit_lock);
643
672 drm_modeset_lock_init(&crtc->mutex); 644 drm_modeset_lock_init(&crtc->mutex);
673 ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); 645 ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
674 if (ret) 646 if (ret)
@@ -692,7 +664,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
692 crtc->base.properties = &crtc->properties; 664 crtc->base.properties = &crtc->properties;
693 665
694 list_add_tail(&crtc->head, &config->crtc_list); 666 list_add_tail(&crtc->head, &config->crtc_list);
695 config->num_crtc++; 667 crtc->index = config->num_crtc++;
696 668
697 crtc->primary = primary; 669 crtc->primary = primary;
698 crtc->cursor = cursor; 670 crtc->cursor = cursor;
@@ -722,6 +694,11 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
722{ 694{
723 struct drm_device *dev = crtc->dev; 695 struct drm_device *dev = crtc->dev;
724 696
697 /* Note that the crtc_list is considered to be static; should we
698 * remove the drm_crtc at runtime we would have to decrement all
699 * the indices on the drm_crtc after us in the crtc_list.
700 */
701
725 kfree(crtc->gamma_store); 702 kfree(crtc->gamma_store);
726 crtc->gamma_store = NULL; 703 crtc->gamma_store = NULL;
727 704
@@ -741,29 +718,6 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
741} 718}
742EXPORT_SYMBOL(drm_crtc_cleanup); 719EXPORT_SYMBOL(drm_crtc_cleanup);
743 720
744/**
745 * drm_crtc_index - find the index of a registered CRTC
746 * @crtc: CRTC to find index for
747 *
748 * Given a registered CRTC, return the index of that CRTC within a DRM
749 * device's list of CRTCs.
750 */
751unsigned int drm_crtc_index(struct drm_crtc *crtc)
752{
753 unsigned int index = 0;
754 struct drm_crtc *tmp;
755
756 drm_for_each_crtc(tmp, crtc->dev) {
757 if (tmp == crtc)
758 return index;
759
760 index++;
761 }
762
763 BUG();
764}
765EXPORT_SYMBOL(drm_crtc_index);
766
767/* 721/*
768 * drm_mode_remove - remove and free a mode 722 * drm_mode_remove - remove and free a mode
769 * @connector: connector list to modify 723 * @connector: connector list to modify
@@ -1166,7 +1120,7 @@ int drm_encoder_init(struct drm_device *dev,
1166 } 1120 }
1167 1121
1168 list_add_tail(&encoder->head, &dev->mode_config.encoder_list); 1122 list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
1169 dev->mode_config.num_encoder++; 1123 encoder->index = dev->mode_config.num_encoder++;
1170 1124
1171out_put: 1125out_put:
1172 if (ret) 1126 if (ret)
@@ -1180,29 +1134,6 @@ out_unlock:
1180EXPORT_SYMBOL(drm_encoder_init); 1134EXPORT_SYMBOL(drm_encoder_init);
1181 1135
1182/** 1136/**
1183 * drm_encoder_index - find the index of a registered encoder
1184 * @encoder: encoder to find index for
1185 *
1186 * Given a registered encoder, return the index of that encoder within a DRM
1187 * device's list of encoders.
1188 */
1189unsigned int drm_encoder_index(struct drm_encoder *encoder)
1190{
1191 unsigned int index = 0;
1192 struct drm_encoder *tmp;
1193
1194 drm_for_each_encoder(tmp, encoder->dev) {
1195 if (tmp == encoder)
1196 return index;
1197
1198 index++;
1199 }
1200
1201 BUG();
1202}
1203EXPORT_SYMBOL(drm_encoder_index);
1204
1205/**
1206 * drm_encoder_cleanup - cleans up an initialised encoder 1137 * drm_encoder_cleanup - cleans up an initialised encoder
1207 * @encoder: encoder to cleanup 1138 * @encoder: encoder to cleanup
1208 * 1139 *
@@ -1212,6 +1143,11 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
1212{ 1143{
1213 struct drm_device *dev = encoder->dev; 1144 struct drm_device *dev = encoder->dev;
1214 1145
1146 /* Note that the encoder_list is considered to be static; should we
1147 * remove the drm_encoder at runtime we would have to decrement all
1148 * the indices on the drm_encoder after us in the encoder_list.
1149 */
1150
1215 drm_modeset_lock_all(dev); 1151 drm_modeset_lock_all(dev);
1216 drm_mode_object_unregister(dev, &encoder->base); 1152 drm_mode_object_unregister(dev, &encoder->base);
1217 kfree(encoder->name); 1153 kfree(encoder->name);
@@ -1300,7 +1236,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
1300 plane->type = type; 1236 plane->type = type;
1301 1237
1302 list_add_tail(&plane->head, &config->plane_list); 1238 list_add_tail(&plane->head, &config->plane_list);
1303 config->num_total_plane++; 1239 plane->index = config->num_total_plane++;
1304 if (plane->type == DRM_PLANE_TYPE_OVERLAY) 1240 if (plane->type == DRM_PLANE_TYPE_OVERLAY)
1305 config->num_overlay_plane++; 1241 config->num_overlay_plane++;
1306 1242
@@ -1374,6 +1310,11 @@ void drm_plane_cleanup(struct drm_plane *plane)
1374 1310
1375 BUG_ON(list_empty(&plane->head)); 1311 BUG_ON(list_empty(&plane->head));
1376 1312
1313 /* Note that the plane_list is considered to be static; should we
1314 * remove the drm_plane at runtime we would have to decrement all
1315 * the indices on the drm_plane after us in the plane_list.
1316 */
1317
1377 list_del(&plane->head); 1318 list_del(&plane->head);
1378 dev->mode_config.num_total_plane--; 1319 dev->mode_config.num_total_plane--;
1379 if (plane->type == DRM_PLANE_TYPE_OVERLAY) 1320 if (plane->type == DRM_PLANE_TYPE_OVERLAY)
@@ -1391,29 +1332,6 @@ void drm_plane_cleanup(struct drm_plane *plane)
1391EXPORT_SYMBOL(drm_plane_cleanup); 1332EXPORT_SYMBOL(drm_plane_cleanup);
1392 1333
1393/** 1334/**
1394 * drm_plane_index - find the index of a registered plane
1395 * @plane: plane to find index for
1396 *
1397 * Given a registered plane, return the index of that CRTC within a DRM
1398 * device's list of planes.
1399 */
1400unsigned int drm_plane_index(struct drm_plane *plane)
1401{
1402 unsigned int index = 0;
1403 struct drm_plane *tmp;
1404
1405 drm_for_each_plane(tmp, plane->dev) {
1406 if (tmp == plane)
1407 return index;
1408
1409 index++;
1410 }
1411
1412 BUG();
1413}
1414EXPORT_SYMBOL(drm_plane_index);
1415
1416/**
1417 * drm_plane_from_index - find the registered plane at an index 1335 * drm_plane_from_index - find the registered plane at an index
1418 * @dev: DRM device 1336 * @dev: DRM device
1419 * @idx: index of registered plane to find for 1337 * @idx: index of registered plane to find for
@@ -1425,13 +1343,11 @@ struct drm_plane *
1425drm_plane_from_index(struct drm_device *dev, int idx) 1343drm_plane_from_index(struct drm_device *dev, int idx)
1426{ 1344{
1427 struct drm_plane *plane; 1345 struct drm_plane *plane;
1428 unsigned int i = 0;
1429 1346
1430 drm_for_each_plane(plane, dev) { 1347 drm_for_each_plane(plane, dev)
1431 if (i == idx) 1348 if (idx == plane->index)
1432 return plane; 1349 return plane;
1433 i++; 1350
1434 }
1435 return NULL; 1351 return NULL;
1436} 1352}
1437EXPORT_SYMBOL(drm_plane_from_index); 1353EXPORT_SYMBOL(drm_plane_from_index);
@@ -2821,8 +2737,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2821 goto out; 2737 goto out;
2822 } 2738 }
2823 2739
2824 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
2825
2826 /* 2740 /*
2827 * Check whether the primary plane supports the fb pixel format. 2741 * Check whether the primary plane supports the fb pixel format.
2828 * Drivers not implementing the universal planes API use a 2742 * Drivers not implementing the universal planes API use a
@@ -2977,6 +2891,8 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
2977 DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); 2891 DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
2978 return PTR_ERR(fb); 2892 return PTR_ERR(fb);
2979 } 2893 }
2894 fb->hot_x = req->hot_x;
2895 fb->hot_y = req->hot_y;
2980 } else { 2896 } else {
2981 fb = NULL; 2897 fb = NULL;
2982 } 2898 }
@@ -4841,7 +4757,8 @@ bool drm_property_change_valid_get(struct drm_property *property,
4841 if (value == 0) 4757 if (value == 0)
4842 return true; 4758 return true;
4843 4759
4844 return _object_find(property->dev, value, property->values[0]) != NULL; 4760 *ref = _object_find(property->dev, value, property->values[0]);
4761 return *ref != NULL;
4845 } 4762 }
4846 4763
4847 for (i = 0; i < property->num_values; i++) 4764 for (i = 0; i < property->num_values; i++)
@@ -5139,6 +5056,9 @@ EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
5139int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, 5056int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
5140 int gamma_size) 5057 int gamma_size)
5141{ 5058{
5059 uint16_t *r_base, *g_base, *b_base;
5060 int i;
5061
5142 crtc->gamma_size = gamma_size; 5062 crtc->gamma_size = gamma_size;
5143 5063
5144 crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3, 5064 crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
@@ -5148,6 +5068,16 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
5148 return -ENOMEM; 5068 return -ENOMEM;
5149 } 5069 }
5150 5070
5071 r_base = crtc->gamma_store;
5072 g_base = r_base + gamma_size;
5073 b_base = g_base + gamma_size;
5074 for (i = 0; i < gamma_size; i++) {
5075 r_base[i] = i << 8;
5076 g_base[i] = i << 8;
5077 b_base[i] = i << 8;
5078 }
5079
5080
5151 return 0; 5081 return 0;
5152} 5082}
5153EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size); 5083EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
@@ -5215,7 +5145,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
5215 goto out; 5145 goto out;
5216 } 5146 }
5217 5147
5218 crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); 5148 ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
5219 5149
5220out: 5150out:
5221 drm_modeset_unlock_all(dev); 5151 drm_modeset_unlock_all(dev);
@@ -5545,264 +5475,6 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
5545} 5475}
5546 5476
5547/** 5477/**
5548 * drm_fb_get_bpp_depth - get the bpp/depth values for format
5549 * @format: pixel format (DRM_FORMAT_*)
5550 * @depth: storage for the depth value
5551 * @bpp: storage for the bpp value
5552 *
5553 * This only supports RGB formats here for compat with code that doesn't use
5554 * pixel formats directly yet.
5555 */
5556void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
5557 int *bpp)
5558{
5559 switch (format) {
5560 case DRM_FORMAT_C8:
5561 case DRM_FORMAT_RGB332:
5562 case DRM_FORMAT_BGR233:
5563 *depth = 8;
5564 *bpp = 8;
5565 break;
5566 case DRM_FORMAT_XRGB1555:
5567 case DRM_FORMAT_XBGR1555:
5568 case DRM_FORMAT_RGBX5551:
5569 case DRM_FORMAT_BGRX5551:
5570 case DRM_FORMAT_ARGB1555:
5571 case DRM_FORMAT_ABGR1555:
5572 case DRM_FORMAT_RGBA5551:
5573 case DRM_FORMAT_BGRA5551:
5574 *depth = 15;
5575 *bpp = 16;
5576 break;
5577 case DRM_FORMAT_RGB565:
5578 case DRM_FORMAT_BGR565:
5579 *depth = 16;
5580 *bpp = 16;
5581 break;
5582 case DRM_FORMAT_RGB888:
5583 case DRM_FORMAT_BGR888:
5584 *depth = 24;
5585 *bpp = 24;
5586 break;
5587 case DRM_FORMAT_XRGB8888:
5588 case DRM_FORMAT_XBGR8888:
5589 case DRM_FORMAT_RGBX8888:
5590 case DRM_FORMAT_BGRX8888:
5591 *depth = 24;
5592 *bpp = 32;
5593 break;
5594 case DRM_FORMAT_XRGB2101010:
5595 case DRM_FORMAT_XBGR2101010:
5596 case DRM_FORMAT_RGBX1010102:
5597 case DRM_FORMAT_BGRX1010102:
5598 case DRM_FORMAT_ARGB2101010:
5599 case DRM_FORMAT_ABGR2101010:
5600 case DRM_FORMAT_RGBA1010102:
5601 case DRM_FORMAT_BGRA1010102:
5602 *depth = 30;
5603 *bpp = 32;
5604 break;
5605 case DRM_FORMAT_ARGB8888:
5606 case DRM_FORMAT_ABGR8888:
5607 case DRM_FORMAT_RGBA8888:
5608 case DRM_FORMAT_BGRA8888:
5609 *depth = 32;
5610 *bpp = 32;
5611 break;
5612 default:
5613 DRM_DEBUG_KMS("unsupported pixel format %s\n",
5614 drm_get_format_name(format));
5615 *depth = 0;
5616 *bpp = 0;
5617 break;
5618 }
5619}
5620EXPORT_SYMBOL(drm_fb_get_bpp_depth);
5621
5622/**
5623 * drm_format_num_planes - get the number of planes for format
5624 * @format: pixel format (DRM_FORMAT_*)
5625 *
5626 * Returns:
5627 * The number of planes used by the specified pixel format.
5628 */
5629int drm_format_num_planes(uint32_t format)
5630{
5631 switch (format) {
5632 case DRM_FORMAT_YUV410:
5633 case DRM_FORMAT_YVU410:
5634 case DRM_FORMAT_YUV411:
5635 case DRM_FORMAT_YVU411:
5636 case DRM_FORMAT_YUV420:
5637 case DRM_FORMAT_YVU420:
5638 case DRM_FORMAT_YUV422:
5639 case DRM_FORMAT_YVU422:
5640 case DRM_FORMAT_YUV444:
5641 case DRM_FORMAT_YVU444:
5642 return 3;
5643 case DRM_FORMAT_NV12:
5644 case DRM_FORMAT_NV21:
5645 case DRM_FORMAT_NV16:
5646 case DRM_FORMAT_NV61:
5647 case DRM_FORMAT_NV24:
5648 case DRM_FORMAT_NV42:
5649 return 2;
5650 default:
5651 return 1;
5652 }
5653}
5654EXPORT_SYMBOL(drm_format_num_planes);
5655
5656/**
5657 * drm_format_plane_cpp - determine the bytes per pixel value
5658 * @format: pixel format (DRM_FORMAT_*)
5659 * @plane: plane index
5660 *
5661 * Returns:
5662 * The bytes per pixel value for the specified plane.
5663 */
5664int drm_format_plane_cpp(uint32_t format, int plane)
5665{
5666 unsigned int depth;
5667 int bpp;
5668
5669 if (plane >= drm_format_num_planes(format))
5670 return 0;
5671
5672 switch (format) {
5673 case DRM_FORMAT_YUYV:
5674 case DRM_FORMAT_YVYU:
5675 case DRM_FORMAT_UYVY:
5676 case DRM_FORMAT_VYUY:
5677 return 2;
5678 case DRM_FORMAT_NV12:
5679 case DRM_FORMAT_NV21:
5680 case DRM_FORMAT_NV16:
5681 case DRM_FORMAT_NV61:
5682 case DRM_FORMAT_NV24:
5683 case DRM_FORMAT_NV42:
5684 return plane ? 2 : 1;
5685 case DRM_FORMAT_YUV410:
5686 case DRM_FORMAT_YVU410:
5687 case DRM_FORMAT_YUV411:
5688 case DRM_FORMAT_YVU411:
5689 case DRM_FORMAT_YUV420:
5690 case DRM_FORMAT_YVU420:
5691 case DRM_FORMAT_YUV422:
5692 case DRM_FORMAT_YVU422:
5693 case DRM_FORMAT_YUV444:
5694 case DRM_FORMAT_YVU444:
5695 return 1;
5696 default:
5697 drm_fb_get_bpp_depth(format, &depth, &bpp);
5698 return bpp >> 3;
5699 }
5700}
5701EXPORT_SYMBOL(drm_format_plane_cpp);
5702
5703/**
5704 * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
5705 * @format: pixel format (DRM_FORMAT_*)
5706 *
5707 * Returns:
5708 * The horizontal chroma subsampling factor for the
5709 * specified pixel format.
5710 */
5711int drm_format_horz_chroma_subsampling(uint32_t format)
5712{
5713 switch (format) {
5714 case DRM_FORMAT_YUV411:
5715 case DRM_FORMAT_YVU411:
5716 case DRM_FORMAT_YUV410:
5717 case DRM_FORMAT_YVU410:
5718 return 4;
5719 case DRM_FORMAT_YUYV:
5720 case DRM_FORMAT_YVYU:
5721 case DRM_FORMAT_UYVY:
5722 case DRM_FORMAT_VYUY:
5723 case DRM_FORMAT_NV12:
5724 case DRM_FORMAT_NV21:
5725 case DRM_FORMAT_NV16:
5726 case DRM_FORMAT_NV61:
5727 case DRM_FORMAT_YUV422:
5728 case DRM_FORMAT_YVU422:
5729 case DRM_FORMAT_YUV420:
5730 case DRM_FORMAT_YVU420:
5731 return 2;
5732 default:
5733 return 1;
5734 }
5735}
5736EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
5737
5738/**
5739 * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
5740 * @format: pixel format (DRM_FORMAT_*)
5741 *
5742 * Returns:
5743 * The vertical chroma subsampling factor for the
5744 * specified pixel format.
5745 */
5746int drm_format_vert_chroma_subsampling(uint32_t format)
5747{
5748 switch (format) {
5749 case DRM_FORMAT_YUV410:
5750 case DRM_FORMAT_YVU410:
5751 return 4;
5752 case DRM_FORMAT_YUV420:
5753 case DRM_FORMAT_YVU420:
5754 case DRM_FORMAT_NV12:
5755 case DRM_FORMAT_NV21:
5756 return 2;
5757 default:
5758 return 1;
5759 }
5760}
5761EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
5762
5763/**
5764 * drm_format_plane_width - width of the plane given the first plane
5765 * @width: width of the first plane
5766 * @format: pixel format
5767 * @plane: plane index
5768 *
5769 * Returns:
5770 * The width of @plane, given that the width of the first plane is @width.
5771 */
5772int drm_format_plane_width(int width, uint32_t format, int plane)
5773{
5774 if (plane >= drm_format_num_planes(format))
5775 return 0;
5776
5777 if (plane == 0)
5778 return width;
5779
5780 return width / drm_format_horz_chroma_subsampling(format);
5781}
5782EXPORT_SYMBOL(drm_format_plane_width);
5783
5784/**
5785 * drm_format_plane_height - height of the plane given the first plane
5786 * @height: height of the first plane
5787 * @format: pixel format
5788 * @plane: plane index
5789 *
5790 * Returns:
5791 * The height of @plane, given that the height of the first plane is @height.
5792 */
5793int drm_format_plane_height(int height, uint32_t format, int plane)
5794{
5795 if (plane >= drm_format_num_planes(format))
5796 return 0;
5797
5798 if (plane == 0)
5799 return height;
5800
5801 return height / drm_format_vert_chroma_subsampling(format);
5802}
5803EXPORT_SYMBOL(drm_format_plane_height);
5804
5805/**
5806 * drm_rotation_simplify() - Try to simplify the rotation 5478 * drm_rotation_simplify() - Try to simplify the rotation
5807 * @rotation: Rotation to be simplified 5479 * @rotation: Rotation to be simplified
5808 * @supported_rotations: Supported rotations 5480 * @supported_rotations: Supported rotations
@@ -6065,3 +5737,48 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
6065 return tg; 5737 return tg;
6066} 5738}
6067EXPORT_SYMBOL(drm_mode_create_tile_group); 5739EXPORT_SYMBOL(drm_mode_create_tile_group);
5740
5741/**
5742 * drm_crtc_enable_color_mgmt - enable color management properties
5743 * @crtc: DRM CRTC
5744 * @degamma_lut_size: the size of the degamma lut (before CSC)
5745 * @has_ctm: whether to attach ctm_property for CSC matrix
5746 * @gamma_lut_size: the size of the gamma lut (after CSC)
5747 *
5748 * This function lets the driver enable the color correction
5749 * properties on a CRTC. This includes 3 degamma, csc and gamma
5750 * properties that userspace can set and 2 size properties to inform
5751 * the userspace of the lut sizes. Each of the properties are
5752 * optional. The gamma and degamma properties are only attached if
5753 * their size is not 0 and ctm_property is only attached if has_ctm is
5754 * true.
5755 */
5756void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
5757 uint degamma_lut_size,
5758 bool has_ctm,
5759 uint gamma_lut_size)
5760{
5761 struct drm_device *dev = crtc->dev;
5762 struct drm_mode_config *config = &dev->mode_config;
5763
5764 if (degamma_lut_size) {
5765 drm_object_attach_property(&crtc->base,
5766 config->degamma_lut_property, 0);
5767 drm_object_attach_property(&crtc->base,
5768 config->degamma_lut_size_property,
5769 degamma_lut_size);
5770 }
5771
5772 if (has_ctm)
5773 drm_object_attach_property(&crtc->base,
5774 config->ctm_property, 0);
5775
5776 if (gamma_lut_size) {
5777 drm_object_attach_property(&crtc->base,
5778 config->gamma_lut_property, 0);
5779 drm_object_attach_property(&crtc->base,
5780 config->gamma_lut_size_property,
5781 gamma_lut_size);
5782 }
5783}
5784EXPORT_SYMBOL(drm_crtc_enable_color_mgmt);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index a6e42433ef0e..bf10d7046aa6 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -1121,36 +1121,3 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
1121 return drm_plane_helper_commit(plane, plane_state, old_fb); 1121 return drm_plane_helper_commit(plane, plane_state, old_fb);
1122} 1122}
1123EXPORT_SYMBOL(drm_helper_crtc_mode_set_base); 1123EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
1124
1125/**
1126 * drm_helper_crtc_enable_color_mgmt - enable color management properties
1127 * @crtc: DRM CRTC
1128 * @degamma_lut_size: the size of the degamma lut (before CSC)
1129 * @gamma_lut_size: the size of the gamma lut (after CSC)
1130 *
1131 * This function lets the driver enable the color correction properties on a
1132 * CRTC. This includes 3 degamma, csc and gamma properties that userspace can
1133 * set and 2 size properties to inform the userspace of the lut sizes.
1134 */
1135void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
1136 int degamma_lut_size,
1137 int gamma_lut_size)
1138{
1139 struct drm_device *dev = crtc->dev;
1140 struct drm_mode_config *config = &dev->mode_config;
1141
1142 drm_object_attach_property(&crtc->base,
1143 config->degamma_lut_property, 0);
1144 drm_object_attach_property(&crtc->base,
1145 config->ctm_property, 0);
1146 drm_object_attach_property(&crtc->base,
1147 config->gamma_lut_property, 0);
1148
1149 drm_object_attach_property(&crtc->base,
1150 config->degamma_lut_size_property,
1151 degamma_lut_size);
1152 drm_object_attach_property(&crtc->base,
1153 config->gamma_lut_size_property,
1154 gamma_lut_size);
1155}
1156EXPORT_SYMBOL(drm_helper_crtc_enable_color_mgmt);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index bff89226a344..8b2582aeaab6 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -605,8 +605,6 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
605 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL); 605 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
606 if (ret) 606 if (ret)
607 goto err_minors; 607 goto err_minors;
608
609 WARN_ON(driver->suspend || driver->resume);
610 } 608 }
611 609
612 if (drm_core_check_feature(dev, DRIVER_RENDER)) { 610 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 9a401aed98e0..622f788bff46 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -271,7 +271,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
271 * by commas, search through the list looking for one that 271 * by commas, search through the list looking for one that
272 * matches the connector. 272 * matches the connector.
273 * 273 *
274 * If there's one or more that don't't specify a connector, keep 274 * If there's one or more that doesn't specify a connector, keep
275 * the last one found one as a fallback. 275 * the last one found one as a fallback.
276 */ 276 */
277 fwstr = kstrdup(edid_firmware, GFP_KERNEL); 277 fwstr = kstrdup(edid_firmware, GFP_KERNEL);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 172cafe11c71..c0b0c718994a 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -23,6 +23,7 @@
23#include <drm/drm_crtc_helper.h> 23#include <drm/drm_crtc_helper.h>
24#include <drm/drm_gem_cma_helper.h> 24#include <drm/drm_gem_cma_helper.h>
25#include <drm/drm_fb_cma_helper.h> 25#include <drm/drm_fb_cma_helper.h>
26#include <linux/dma-mapping.h>
26#include <linux/module.h> 27#include <linux/module.h>
27 28
28#define DEFAULT_FBDEFIO_DELAY_MS 50 29#define DEFAULT_FBDEFIO_DELAY_MS 50
@@ -52,7 +53,7 @@ struct drm_fbdev_cma {
52 * will be set up automatically. dirty() is called by 53 * will be set up automatically. dirty() is called by
53 * drm_fb_helper_deferred_io() in process context (struct delayed_work). 54 * drm_fb_helper_deferred_io() in process context (struct delayed_work).
54 * 55 *
55 * Example fbdev deferred io code: 56 * Example fbdev deferred io code::
56 * 57 *
57 * static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb, 58 * static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb,
58 * struct drm_file *file_priv, 59 * struct drm_file *file_priv,
@@ -162,6 +163,10 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
162 * drm_fb_cma_create_with_funcs() - helper function for the 163 * drm_fb_cma_create_with_funcs() - helper function for the
163 * &drm_mode_config_funcs ->fb_create 164 * &drm_mode_config_funcs ->fb_create
164 * callback function 165 * callback function
166 * @dev: DRM device
167 * @file_priv: drm file for the ioctl call
168 * @mode_cmd: metadata from the userspace fb creation request
169 * @funcs: vtable to be used for the new framebuffer object
165 * 170 *
166 * This can be used to set &drm_framebuffer_funcs for drivers that need the 171 * This can be used to set &drm_framebuffer_funcs for drivers that need the
167 * dirty() callback. Use drm_fb_cma_create() if you don't need to change 172 * dirty() callback. Use drm_fb_cma_create() if you don't need to change
@@ -223,6 +228,9 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
223 228
224/** 229/**
225 * drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function 230 * drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function
231 * @dev: DRM device
232 * @file_priv: drm file for the ioctl call
233 * @mode_cmd: metadata from the userspace fb creation request
226 * 234 *
227 * If your hardware has special alignment or pitch requirements these should be 235 * If your hardware has special alignment or pitch requirements these should be
228 * checked before calling this function. Use drm_fb_cma_create_with_funcs() if 236 * checked before calling this function. Use drm_fb_cma_create_with_funcs() if
@@ -246,7 +254,7 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create);
246 * This function will usually be called from the CRTC callback functions. 254 * This function will usually be called from the CRTC callback functions.
247 */ 255 */
248struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, 256struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
249 unsigned int plane) 257 unsigned int plane)
250{ 258{
251 struct drm_fb_cma *fb_cma = to_fb_cma(fb); 259 struct drm_fb_cma *fb_cma = to_fb_cma(fb);
252 260
@@ -258,10 +266,6 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
258EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj); 266EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
259 267
260#ifdef CONFIG_DEBUG_FS 268#ifdef CONFIG_DEBUG_FS
261/*
262 * drm_fb_cma_describe() - Helper to dump information about a single
263 * CMA framebuffer object
264 */
265static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m) 269static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
266{ 270{
267 struct drm_fb_cma *fb_cma = to_fb_cma(fb); 271 struct drm_fb_cma *fb_cma = to_fb_cma(fb);
@@ -279,7 +283,9 @@ static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
279 283
280/** 284/**
281 * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects 285 * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
282 * in debugfs. 286 * in debugfs.
287 * @m: output file
288 * @arg: private data for the callback
283 */ 289 */
284int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg) 290int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
285{ 291{
@@ -297,6 +303,12 @@ int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
297EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show); 303EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
298#endif 304#endif
299 305
306static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
307{
308 return dma_mmap_writecombine(info->device, vma, info->screen_base,
309 info->fix.smem_start, info->fix.smem_len);
310}
311
300static struct fb_ops drm_fbdev_cma_ops = { 312static struct fb_ops drm_fbdev_cma_ops = {
301 .owner = THIS_MODULE, 313 .owner = THIS_MODULE,
302 .fb_fillrect = drm_fb_helper_sys_fillrect, 314 .fb_fillrect = drm_fb_helper_sys_fillrect,
@@ -307,6 +319,7 @@ static struct fb_ops drm_fbdev_cma_ops = {
307 .fb_blank = drm_fb_helper_blank, 319 .fb_blank = drm_fb_helper_blank,
308 .fb_pan_display = drm_fb_helper_pan_display, 320 .fb_pan_display = drm_fb_helper_pan_display,
309 .fb_setcmap = drm_fb_helper_setcmap, 321 .fb_setcmap = drm_fb_helper_setcmap,
322 .fb_mmap = drm_fb_cma_mmap,
310}; 323};
311 324
312static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info, 325static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
@@ -333,6 +346,7 @@ static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
333 fbops = kzalloc(sizeof(*fbops), GFP_KERNEL); 346 fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
334 if (!fbdefio || !fbops) { 347 if (!fbdefio || !fbops) {
335 kfree(fbdefio); 348 kfree(fbdefio);
349 kfree(fbops);
336 return -ENOMEM; 350 return -ENOMEM;
337 } 351 }
338 352
@@ -445,7 +459,7 @@ err_cma_destroy:
445err_fb_info_destroy: 459err_fb_info_destroy:
446 drm_fb_helper_release_fbi(helper); 460 drm_fb_helper_release_fbi(helper);
447err_gem_free_object: 461err_gem_free_object:
448 dev->driver->gem_free_object(&obj->base); 462 drm_gem_object_unreference_unlocked(&obj->base);
449 return ret; 463 return ret;
450} 464}
451EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs); 465EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 7c2eb75db60f..0bac5246e5a7 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -227,7 +227,7 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
227 g_base = r_base + crtc->gamma_size; 227 g_base = r_base + crtc->gamma_size;
228 b_base = g_base + crtc->gamma_size; 228 b_base = g_base + crtc->gamma_size;
229 229
230 crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); 230 crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
231} 231}
232 232
233/** 233/**
@@ -385,7 +385,7 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
385 385
386 drm_warn_on_modeset_not_all_locked(dev); 386 drm_warn_on_modeset_not_all_locked(dev);
387 387
388 if (fb_helper->atomic) 388 if (dev->mode_config.funcs->atomic_commit)
389 return restore_fbdev_mode_atomic(fb_helper); 389 return restore_fbdev_mode_atomic(fb_helper);
390 390
391 drm_for_each_plane(plane, dev) { 391 drm_for_each_plane(plane, dev) {
@@ -716,8 +716,6 @@ int drm_fb_helper_init(struct drm_device *dev,
716 i++; 716 i++;
717 } 717 }
718 718
719 fb_helper->atomic = !!drm_core_check_feature(dev, DRIVER_ATOMIC);
720
721 return 0; 719 return 0;
722out_free: 720out_free:
723 drm_fb_helper_crtc_free(fb_helper); 721 drm_fb_helper_crtc_free(fb_helper);
@@ -1042,7 +1040,6 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
1042{ 1040{
1043 struct drm_fb_helper *fb_helper = info->par; 1041 struct drm_fb_helper *fb_helper = info->par;
1044 struct drm_framebuffer *fb = fb_helper->fb; 1042 struct drm_framebuffer *fb = fb_helper->fb;
1045 int pindex;
1046 1043
1047 if (info->fix.visual == FB_VISUAL_TRUECOLOR) { 1044 if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
1048 u32 *palette; 1045 u32 *palette;
@@ -1074,38 +1071,10 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
1074 !fb_helper->funcs->gamma_get)) 1071 !fb_helper->funcs->gamma_get))
1075 return -EINVAL; 1072 return -EINVAL;
1076 1073
1077 pindex = regno; 1074 WARN_ON(fb->bits_per_pixel != 8);
1078
1079 if (fb->bits_per_pixel == 16) {
1080 pindex = regno << 3;
1081
1082 if (fb->depth == 16 && regno > 63)
1083 return -EINVAL;
1084 if (fb->depth == 15 && regno > 31)
1085 return -EINVAL;
1086
1087 if (fb->depth == 16) {
1088 u16 r, g, b;
1089 int i;
1090 if (regno < 32) {
1091 for (i = 0; i < 8; i++)
1092 fb_helper->funcs->gamma_set(crtc, red,
1093 green, blue, pindex + i);
1094 }
1095 1075
1096 fb_helper->funcs->gamma_get(crtc, &r, 1076 fb_helper->funcs->gamma_set(crtc, red, green, blue, regno);
1097 &g, &b,
1098 pindex >> 1);
1099 1077
1100 for (i = 0; i < 4; i++)
1101 fb_helper->funcs->gamma_set(crtc, r,
1102 green, b,
1103 (pindex >> 1) + i);
1104 }
1105 }
1106
1107 if (fb->depth != 16)
1108 fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
1109 return 0; 1078 return 0;
1110} 1079}
1111 1080
@@ -1373,7 +1342,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
1373 return -EBUSY; 1342 return -EBUSY;
1374 } 1343 }
1375 1344
1376 if (fb_helper->atomic) { 1345 if (dev->mode_config.funcs->atomic_commit) {
1377 ret = pan_display_atomic(var, info); 1346 ret = pan_display_atomic(var, info);
1378 goto unlock; 1347 goto unlock;
1379 } 1348 }
@@ -2000,7 +1969,18 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
2000 my_score++; 1969 my_score++;
2001 1970
2002 connector_funcs = connector->helper_private; 1971 connector_funcs = connector->helper_private;
2003 encoder = connector_funcs->best_encoder(connector); 1972
1973 /*
1974 * If the DRM device implements atomic hooks and ->best_encoder() is
1975 * NULL we fallback to the default drm_atomic_helper_best_encoder()
1976 * helper.
1977 */
1978 if (fb_helper->dev->mode_config.funcs->atomic_commit &&
1979 !connector_funcs->best_encoder)
1980 encoder = drm_atomic_helper_best_encoder(connector);
1981 else
1982 encoder = connector_funcs->best_encoder(connector);
1983
2004 if (!encoder) 1984 if (!encoder)
2005 goto out; 1985 goto out;
2006 1986
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 7af7f8bcb355..a27bc7cda975 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -67,7 +67,7 @@ DEFINE_MUTEX(drm_global_mutex);
67 * specific implementations. For GEM-based drivers this is drm_gem_mmap(). 67 * specific implementations. For GEM-based drivers this is drm_gem_mmap().
68 * 68 *
69 * No other file operations are supported by the DRM userspace API. Overall the 69 * No other file operations are supported by the DRM userspace API. Overall the
70 * following is an example #file_operations structure: 70 * following is an example #file_operations structure::
71 * 71 *
72 * static const example_drm_fops = { 72 * static const example_drm_fops = {
73 * .owner = THIS_MODULE, 73 * .owner = THIS_MODULE,
@@ -368,7 +368,7 @@ static void drm_events_release(struct drm_file *file_priv)
368 /* Remove unconsumed events */ 368 /* Remove unconsumed events */
369 list_for_each_entry_safe(e, et, &file_priv->event_list, link) { 369 list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
370 list_del(&e->link); 370 list_del(&e->link);
371 e->destroy(e); 371 kfree(e);
372 } 372 }
373 373
374 spin_unlock_irqrestore(&dev->event_lock, flags); 374 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -636,7 +636,7 @@ put_back_event:
636 } 636 }
637 637
638 ret += length; 638 ret += length;
639 e->destroy(e); 639 kfree(e);
640 } 640 }
641 } 641 }
642 mutex_unlock(&file_priv->event_read_lock); 642 mutex_unlock(&file_priv->event_read_lock);
@@ -713,9 +713,6 @@ int drm_event_reserve_init_locked(struct drm_device *dev,
713 list_add(&p->pending_link, &file_priv->pending_event_list); 713 list_add(&p->pending_link, &file_priv->pending_event_list);
714 p->file_priv = file_priv; 714 p->file_priv = file_priv;
715 715
716 /* we *could* pass this in as arg, but everyone uses kfree: */
717 p->destroy = (void (*) (struct drm_pending_event *)) kfree;
718
719 return 0; 716 return 0;
720} 717}
721EXPORT_SYMBOL(drm_event_reserve_init_locked); 718EXPORT_SYMBOL(drm_event_reserve_init_locked);
@@ -778,7 +775,7 @@ void drm_event_cancel_free(struct drm_device *dev,
778 list_del(&p->pending_link); 775 list_del(&p->pending_link);
779 } 776 }
780 spin_unlock_irqrestore(&dev->event_lock, flags); 777 spin_unlock_irqrestore(&dev->event_lock, flags);
781 p->destroy(p); 778 kfree(p);
782} 779}
783EXPORT_SYMBOL(drm_event_cancel_free); 780EXPORT_SYMBOL(drm_event_cancel_free);
784 781
@@ -800,8 +797,19 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
800{ 797{
801 assert_spin_locked(&dev->event_lock); 798 assert_spin_locked(&dev->event_lock);
802 799
800 if (e->completion) {
801 /* ->completion might disappear as soon as it signalled. */
802 complete_all(e->completion);
803 e->completion = NULL;
804 }
805
806 if (e->fence) {
807 fence_signal(e->fence);
808 fence_put(e->fence);
809 }
810
803 if (!e->file_priv) { 811 if (!e->file_priv) {
804 e->destroy(e); 812 kfree(e);
805 return; 813 return;
806 } 814 }
807 815
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
new file mode 100644
index 000000000000..0645c85d5f95
--- /dev/null
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -0,0 +1,320 @@
1/*
2 * Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
3 *
4 * DRM core format related functions
5 *
6 * Permission to use, copy, modify, distribute, and sell this software and its
7 * documentation for any purpose is hereby granted without fee, provided that
8 * the above copyright notice appear in all copies and that both that copyright
9 * notice and this permission notice appear in supporting documentation, and
10 * that the name of the copyright holders not be used in advertising or
11 * publicity pertaining to distribution of the software without specific,
12 * written prior permission. The copyright holders make no representations
13 * about the suitability of this software for any purpose. It is provided "as
14 * is" without express or implied warranty.
15 *
16 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
17 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
18 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
19 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
20 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
21 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22 * OF THIS SOFTWARE.
23 */
24
25#include <linux/bug.h>
26#include <linux/ctype.h>
27#include <linux/export.h>
28#include <linux/kernel.h>
29
30#include <drm/drmP.h>
31#include <drm/drm_fourcc.h>
32
33static char printable_char(int c)
34{
35 return isascii(c) && isprint(c) ? c : '?';
36}
37
38/**
39 * drm_get_format_name - return a string for drm fourcc format
40 * @format: format to compute name of
41 *
42 * Note that the buffer used by this function is globally shared and owned by
43 * the function itself.
44 *
45 * FIXME: This isn't really multithreading safe.
46 */
47const char *drm_get_format_name(uint32_t format)
48{
49 static char buf[32];
50
51 snprintf(buf, sizeof(buf),
52 "%c%c%c%c %s-endian (0x%08x)",
53 printable_char(format & 0xff),
54 printable_char((format >> 8) & 0xff),
55 printable_char((format >> 16) & 0xff),
56 printable_char((format >> 24) & 0x7f),
57 format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
58 format);
59
60 return buf;
61}
62EXPORT_SYMBOL(drm_get_format_name);
63
64/**
65 * drm_fb_get_bpp_depth - get the bpp/depth values for format
66 * @format: pixel format (DRM_FORMAT_*)
67 * @depth: storage for the depth value
68 * @bpp: storage for the bpp value
69 *
70 * This only supports RGB formats here for compat with code that doesn't use
71 * pixel formats directly yet.
72 */
73void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
74 int *bpp)
75{
76 switch (format) {
77 case DRM_FORMAT_C8:
78 case DRM_FORMAT_RGB332:
79 case DRM_FORMAT_BGR233:
80 *depth = 8;
81 *bpp = 8;
82 break;
83 case DRM_FORMAT_XRGB1555:
84 case DRM_FORMAT_XBGR1555:
85 case DRM_FORMAT_RGBX5551:
86 case DRM_FORMAT_BGRX5551:
87 case DRM_FORMAT_ARGB1555:
88 case DRM_FORMAT_ABGR1555:
89 case DRM_FORMAT_RGBA5551:
90 case DRM_FORMAT_BGRA5551:
91 *depth = 15;
92 *bpp = 16;
93 break;
94 case DRM_FORMAT_RGB565:
95 case DRM_FORMAT_BGR565:
96 *depth = 16;
97 *bpp = 16;
98 break;
99 case DRM_FORMAT_RGB888:
100 case DRM_FORMAT_BGR888:
101 *depth = 24;
102 *bpp = 24;
103 break;
104 case DRM_FORMAT_XRGB8888:
105 case DRM_FORMAT_XBGR8888:
106 case DRM_FORMAT_RGBX8888:
107 case DRM_FORMAT_BGRX8888:
108 *depth = 24;
109 *bpp = 32;
110 break;
111 case DRM_FORMAT_XRGB2101010:
112 case DRM_FORMAT_XBGR2101010:
113 case DRM_FORMAT_RGBX1010102:
114 case DRM_FORMAT_BGRX1010102:
115 case DRM_FORMAT_ARGB2101010:
116 case DRM_FORMAT_ABGR2101010:
117 case DRM_FORMAT_RGBA1010102:
118 case DRM_FORMAT_BGRA1010102:
119 *depth = 30;
120 *bpp = 32;
121 break;
122 case DRM_FORMAT_ARGB8888:
123 case DRM_FORMAT_ABGR8888:
124 case DRM_FORMAT_RGBA8888:
125 case DRM_FORMAT_BGRA8888:
126 *depth = 32;
127 *bpp = 32;
128 break;
129 default:
130 DRM_DEBUG_KMS("unsupported pixel format %s\n",
131 drm_get_format_name(format));
132 *depth = 0;
133 *bpp = 0;
134 break;
135 }
136}
137EXPORT_SYMBOL(drm_fb_get_bpp_depth);
138
139/**
140 * drm_format_num_planes - get the number of planes for format
141 * @format: pixel format (DRM_FORMAT_*)
142 *
143 * Returns:
144 * The number of planes used by the specified pixel format.
145 */
146int drm_format_num_planes(uint32_t format)
147{
148 switch (format) {
149 case DRM_FORMAT_YUV410:
150 case DRM_FORMAT_YVU410:
151 case DRM_FORMAT_YUV411:
152 case DRM_FORMAT_YVU411:
153 case DRM_FORMAT_YUV420:
154 case DRM_FORMAT_YVU420:
155 case DRM_FORMAT_YUV422:
156 case DRM_FORMAT_YVU422:
157 case DRM_FORMAT_YUV444:
158 case DRM_FORMAT_YVU444:
159 return 3;
160 case DRM_FORMAT_NV12:
161 case DRM_FORMAT_NV21:
162 case DRM_FORMAT_NV16:
163 case DRM_FORMAT_NV61:
164 case DRM_FORMAT_NV24:
165 case DRM_FORMAT_NV42:
166 return 2;
167 default:
168 return 1;
169 }
170}
171EXPORT_SYMBOL(drm_format_num_planes);
172
173/**
174 * drm_format_plane_cpp - determine the bytes per pixel value
175 * @format: pixel format (DRM_FORMAT_*)
176 * @plane: plane index
177 *
178 * Returns:
179 * The bytes per pixel value for the specified plane.
180 */
181int drm_format_plane_cpp(uint32_t format, int plane)
182{
183 unsigned int depth;
184 int bpp;
185
186 if (plane >= drm_format_num_planes(format))
187 return 0;
188
189 switch (format) {
190 case DRM_FORMAT_YUYV:
191 case DRM_FORMAT_YVYU:
192 case DRM_FORMAT_UYVY:
193 case DRM_FORMAT_VYUY:
194 return 2;
195 case DRM_FORMAT_NV12:
196 case DRM_FORMAT_NV21:
197 case DRM_FORMAT_NV16:
198 case DRM_FORMAT_NV61:
199 case DRM_FORMAT_NV24:
200 case DRM_FORMAT_NV42:
201 return plane ? 2 : 1;
202 case DRM_FORMAT_YUV410:
203 case DRM_FORMAT_YVU410:
204 case DRM_FORMAT_YUV411:
205 case DRM_FORMAT_YVU411:
206 case DRM_FORMAT_YUV420:
207 case DRM_FORMAT_YVU420:
208 case DRM_FORMAT_YUV422:
209 case DRM_FORMAT_YVU422:
210 case DRM_FORMAT_YUV444:
211 case DRM_FORMAT_YVU444:
212 return 1;
213 default:
214 drm_fb_get_bpp_depth(format, &depth, &bpp);
215 return bpp >> 3;
216 }
217}
218EXPORT_SYMBOL(drm_format_plane_cpp);
219
220/**
221 * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
222 * @format: pixel format (DRM_FORMAT_*)
223 *
224 * Returns:
225 * The horizontal chroma subsampling factor for the
226 * specified pixel format.
227 */
228int drm_format_horz_chroma_subsampling(uint32_t format)
229{
230 switch (format) {
231 case DRM_FORMAT_YUV411:
232 case DRM_FORMAT_YVU411:
233 case DRM_FORMAT_YUV410:
234 case DRM_FORMAT_YVU410:
235 return 4;
236 case DRM_FORMAT_YUYV:
237 case DRM_FORMAT_YVYU:
238 case DRM_FORMAT_UYVY:
239 case DRM_FORMAT_VYUY:
240 case DRM_FORMAT_NV12:
241 case DRM_FORMAT_NV21:
242 case DRM_FORMAT_NV16:
243 case DRM_FORMAT_NV61:
244 case DRM_FORMAT_YUV422:
245 case DRM_FORMAT_YVU422:
246 case DRM_FORMAT_YUV420:
247 case DRM_FORMAT_YVU420:
248 return 2;
249 default:
250 return 1;
251 }
252}
253EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
254
255/**
256 * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
257 * @format: pixel format (DRM_FORMAT_*)
258 *
259 * Returns:
260 * The vertical chroma subsampling factor for the
261 * specified pixel format.
262 */
263int drm_format_vert_chroma_subsampling(uint32_t format)
264{
265 switch (format) {
266 case DRM_FORMAT_YUV410:
267 case DRM_FORMAT_YVU410:
268 return 4;
269 case DRM_FORMAT_YUV420:
270 case DRM_FORMAT_YVU420:
271 case DRM_FORMAT_NV12:
272 case DRM_FORMAT_NV21:
273 return 2;
274 default:
275 return 1;
276 }
277}
278EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
279
280/**
281 * drm_format_plane_width - width of the plane given the first plane
282 * @width: width of the first plane
283 * @format: pixel format
284 * @plane: plane index
285 *
286 * Returns:
287 * The width of @plane, given that the width of the first plane is @width.
288 */
289int drm_format_plane_width(int width, uint32_t format, int plane)
290{
291 if (plane >= drm_format_num_planes(format))
292 return 0;
293
294 if (plane == 0)
295 return width;
296
297 return width / drm_format_horz_chroma_subsampling(format);
298}
299EXPORT_SYMBOL(drm_format_plane_width);
300
301/**
302 * drm_format_plane_height - height of the plane given the first plane
303 * @height: height of the first plane
304 * @format: pixel format
305 * @plane: plane index
306 *
307 * Returns:
308 * The height of @plane, given that the height of the first plane is @height.
309 */
310int drm_format_plane_height(int height, uint32_t format, int plane)
311{
312 if (plane >= drm_format_num_planes(format))
313 return 0;
314
315 if (plane == 0)
316 return height;
317
318 return height / drm_format_vert_chroma_subsampling(format);
319}
320EXPORT_SYMBOL(drm_format_plane_height);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 32156060b9c9..5c19dde1cd31 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -787,7 +787,7 @@ EXPORT_SYMBOL(drm_gem_object_release);
787 * @kref: kref of the object to free 787 * @kref: kref of the object to free
788 * 788 *
789 * Called after the last reference to the object has been lost. 789 * Called after the last reference to the object has been lost.
790 * Must be called holding struct_ mutex 790 * Must be called holding &drm_device->struct_mutex.
791 * 791 *
792 * Frees the object 792 * Frees the object
793 */ 793 */
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index e1ab008b3f08..1d6c335584ec 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -121,7 +121,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
121 return cma_obj; 121 return cma_obj;
122 122
123error: 123error:
124 drm->driver->gem_free_object(&cma_obj->base); 124 drm_gem_object_unreference_unlocked(&cma_obj->base);
125 return ERR_PTR(ret); 125 return ERR_PTR(ret);
126} 126}
127EXPORT_SYMBOL_GPL(drm_gem_cma_create); 127EXPORT_SYMBOL_GPL(drm_gem_cma_create);
@@ -162,18 +162,12 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
162 * and handle has the id what user can see. 162 * and handle has the id what user can see.
163 */ 163 */
164 ret = drm_gem_handle_create(file_priv, gem_obj, handle); 164 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
165 if (ret)
166 goto err_handle_create;
167
168 /* drop reference from allocate - handle holds it now. */ 165 /* drop reference from allocate - handle holds it now. */
169 drm_gem_object_unreference_unlocked(gem_obj); 166 drm_gem_object_unreference_unlocked(gem_obj);
167 if (ret)
168 return ERR_PTR(ret);
170 169
171 return cma_obj; 170 return cma_obj;
172
173err_handle_create:
174 drm->driver->gem_free_object(gem_obj);
175
176 return ERR_PTR(ret);
177} 171}
178 172
179/** 173/**
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0fac801c18fe..76e39c50c90c 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -42,10 +42,6 @@
42#include <linux/vgaarb.h> 42#include <linux/vgaarb.h>
43#include <linux/export.h> 43#include <linux/export.h>
44 44
45/* Access macro for slots in vblank timestamp ringbuffer. */
46#define vblanktimestamp(dev, pipe, count) \
47 ((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE])
48
49/* Retry timestamp calculation up to 3 times to satisfy 45/* Retry timestamp calculation up to 3 times to satisfy
50 * drm_timestamp_precision before giving up. 46 * drm_timestamp_precision before giving up.
51 */ 47 */
@@ -82,36 +78,18 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
82 struct timeval *t_vblank, u32 last) 78 struct timeval *t_vblank, u32 last)
83{ 79{
84 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 80 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
85 u32 tslot;
86 81
87 assert_spin_locked(&dev->vblank_time_lock); 82 assert_spin_locked(&dev->vblank_time_lock);
88 83
89 vblank->last = last; 84 vblank->last = last;
90 85
91 /* All writers hold the spinlock, but readers are serialized by 86 write_seqlock(&vblank->seqlock);
92 * the latching of vblank->count below. 87 vblank->time = *t_vblank;
93 */
94 tslot = vblank->count + vblank_count_inc;
95 vblanktimestamp(dev, pipe, tslot) = *t_vblank;
96
97 /*
98 * vblank timestamp updates are protected on the write side with
99 * vblank_time_lock, but on the read side done locklessly using a
100 * sequence-lock on the vblank counter. Ensure correct ordering using
101 * memory barrriers. We need the barrier both before and also after the
102 * counter update to synchronize with the next timestamp write.
103 * The read-side barriers for this are in drm_vblank_count_and_time.
104 */
105 smp_wmb();
106 vblank->count += vblank_count_inc; 88 vblank->count += vblank_count_inc;
107 smp_wmb(); 89 write_sequnlock(&vblank->seqlock);
108} 90}
109 91
110/** 92/*
111 * drm_reset_vblank_timestamp - reset the last timestamp to the last vblank
112 * @dev: DRM device
113 * @pipe: index of CRTC for which to reset the timestamp
114 *
115 * Reset the stored timestamp for the current vblank count to correspond 93 * Reset the stored timestamp for the current vblank count to correspond
116 * to the last vblank occurred. 94 * to the last vblank occurred.
117 * 95 *
@@ -155,11 +133,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe
155 spin_unlock(&dev->vblank_time_lock); 133 spin_unlock(&dev->vblank_time_lock);
156} 134}
157 135
158/** 136/*
159 * drm_update_vblank_count - update the master vblank counter
160 * @dev: DRM device
161 * @pipe: counter to update
162 *
163 * Call back into the driver to update the appropriate vblank counter 137 * Call back into the driver to update the appropriate vblank counter
164 * (specified by @pipe). Deal with wraparound, if it occurred, and 138 * (specified by @pipe). Deal with wraparound, if it occurred, and
165 * update the last read value so we can deal with wraparound on the next 139 * update the last read value so we can deal with wraparound on the next
@@ -205,7 +179,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
205 const struct timeval *t_old; 179 const struct timeval *t_old;
206 u64 diff_ns; 180 u64 diff_ns;
207 181
208 t_old = &vblanktimestamp(dev, pipe, vblank->count); 182 t_old = &vblank->time;
209 diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old); 183 diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
210 184
211 /* 185 /*
@@ -239,49 +213,6 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
239 diff = 1; 213 diff = 1;
240 } 214 }
241 215
242 /*
243 * FIMXE: Need to replace this hack with proper seqlocks.
244 *
245 * Restrict the bump of the software vblank counter to a safe maximum
246 * value of +1 whenever there is the possibility that concurrent readers
247 * of vblank timestamps could be active at the moment, as the current
248 * implementation of the timestamp caching and updating is not safe
249 * against concurrent readers for calls to store_vblank() with a bump
250 * of anything but +1. A bump != 1 would very likely return corrupted
251 * timestamps to userspace, because the same slot in the cache could
252 * be concurrently written by store_vblank() and read by one of those
253 * readers without the read-retry logic detecting the collision.
254 *
255 * Concurrent readers can exist when we are called from the
256 * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
257 * irq callers. However, all those calls to us are happening with the
258 * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
259 * can't increase while we are executing. Therefore a zero refcount at
260 * this point is safe for arbitrary counter bumps if we are called
261 * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
262 * we must also accept a refcount of 1, as whenever we are called from
263 * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
264 * we must let that one pass through in order to not lose vblank counts
265 * during vblank irq off - which would completely defeat the whole
266 * point of this routine.
267 *
268 * Whenever we are called from vblank irq, we have to assume concurrent
269 * readers exist or can show up any time during our execution, even if
270 * the refcount is currently zero, as vblank irqs are usually only
271 * enabled due to the presence of readers, and because when we are called
272 * from vblank irq we can't hold the vbl_lock to protect us from sudden
273 * bumps in vblank refcount. Therefore also restrict bumps to +1 when
274 * called from vblank irq.
275 */
276 if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
277 (flags & DRM_CALLED_FROM_VBLIRQ))) {
278 DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
279 "refcount %u, vblirq %u\n", pipe, diff,
280 atomic_read(&vblank->refcount),
281 (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
282 diff = 1;
283 }
284
285 DRM_DEBUG_VBL("updating vblank count on crtc %u:" 216 DRM_DEBUG_VBL("updating vblank count on crtc %u:"
286 " current=%u, diff=%u, hw=%u hw_last=%u\n", 217 " current=%u, diff=%u, hw=%u hw_last=%u\n",
287 pipe, vblank->count, diff, cur_vblank, vblank->last); 218 pipe, vblank->count, diff, cur_vblank, vblank->last);
@@ -303,6 +234,37 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
303 store_vblank(dev, pipe, diff, &t_vblank, cur_vblank); 234 store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
304} 235}
305 236
237/**
238 * drm_accurate_vblank_count - retrieve the master vblank counter
239 * @crtc: which counter to retrieve
240 *
241 * This function is similar to @drm_crtc_vblank_count but this
242 * function interpolates to handle a race with vblank irq's.
243 *
244 * This is mostly useful for hardware that can obtain the scanout
245 * position, but doesn't have a frame counter.
246 */
247u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
248{
249 struct drm_device *dev = crtc->dev;
250 unsigned int pipe = drm_crtc_index(crtc);
251 u32 vblank;
252 unsigned long flags;
253
254 WARN(!dev->driver->get_vblank_timestamp,
255 "This function requires support for accurate vblank timestamps.");
256
257 spin_lock_irqsave(&dev->vblank_time_lock, flags);
258
259 drm_update_vblank_count(dev, pipe, 0);
260 vblank = drm_vblank_count(dev, pipe);
261
262 spin_unlock_irqrestore(&dev->vblank_time_lock, flags);
263
264 return vblank;
265}
266EXPORT_SYMBOL(drm_accurate_vblank_count);
267
306/* 268/*
307 * Disable vblank irq's on crtc, make sure that last vblank count 269 * Disable vblank irq's on crtc, make sure that last vblank count
308 * of hardware and corresponding consistent software vblank counter 270 * of hardware and corresponding consistent software vblank counter
@@ -417,6 +379,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
417 init_waitqueue_head(&vblank->queue); 379 init_waitqueue_head(&vblank->queue);
418 setup_timer(&vblank->disable_timer, vblank_disable_fn, 380 setup_timer(&vblank->disable_timer, vblank_disable_fn,
419 (unsigned long)vblank); 381 (unsigned long)vblank);
382 seqlock_init(&vblank->seqlock);
420 } 383 }
421 384
422 DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n"); 385 DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
@@ -986,25 +949,19 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
986 struct timeval *vblanktime) 949 struct timeval *vblanktime)
987{ 950{
988 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 951 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
989 int count = DRM_TIMESTAMP_MAXRETRIES; 952 u32 vblank_count;
990 u32 cur_vblank; 953 unsigned int seq;
991 954
992 if (WARN_ON(pipe >= dev->num_crtcs)) 955 if (WARN_ON(pipe >= dev->num_crtcs))
993 return 0; 956 return 0;
994 957
995 /*
996 * Vblank timestamps are read lockless. To ensure consistency the vblank
997 * counter is rechecked and ordering is ensured using memory barriers.
998 * This works like a seqlock. The write-side barriers are in store_vblank.
999 */
1000 do { 958 do {
1001 cur_vblank = vblank->count; 959 seq = read_seqbegin(&vblank->seqlock);
1002 smp_rmb(); 960 vblank_count = vblank->count;
1003 *vblanktime = vblanktimestamp(dev, pipe, cur_vblank); 961 *vblanktime = vblank->time;
1004 smp_rmb(); 962 } while (read_seqretry(&vblank->seqlock, seq));
1005 } while (cur_vblank != vblank->count && --count > 0);
1006 963
1007 return cur_vblank; 964 return vblank_count;
1008} 965}
1009EXPORT_SYMBOL(drm_vblank_count_and_time); 966EXPORT_SYMBOL(drm_vblank_count_and_time);
1010 967
@@ -1044,34 +1001,6 @@ static void send_vblank_event(struct drm_device *dev,
1044} 1001}
1045 1002
1046/** 1003/**
1047 * drm_arm_vblank_event - arm vblank event after pageflip
1048 * @dev: DRM device
1049 * @pipe: CRTC index
1050 * @e: the event to prepare to send
1051 *
1052 * A lot of drivers need to generate vblank events for the very next vblank
1053 * interrupt. For example when the page flip interrupt happens when the page
1054 * flip gets armed, but not when it actually executes within the next vblank
1055 * period. This helper function implements exactly the required vblank arming
1056 * behaviour.
1057 *
1058 * Caller must hold event lock. Caller must also hold a vblank reference for
1059 * the event @e, which will be dropped when the next vblank arrives.
1060 *
1061 * This is the legacy version of drm_crtc_arm_vblank_event().
1062 */
1063void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
1064 struct drm_pending_vblank_event *e)
1065{
1066 assert_spin_locked(&dev->event_lock);
1067
1068 e->pipe = pipe;
1069 e->event.sequence = drm_vblank_count(dev, pipe);
1070 list_add_tail(&e->base.link, &dev->vblank_event_list);
1071}
1072EXPORT_SYMBOL(drm_arm_vblank_event);
1073
1074/**
1075 * drm_crtc_arm_vblank_event - arm vblank event after pageflip 1004 * drm_crtc_arm_vblank_event - arm vblank event after pageflip
1076 * @crtc: the source CRTC of the vblank event 1005 * @crtc: the source CRTC of the vblank event
1077 * @e: the event to send 1006 * @e: the event to send
@@ -1084,32 +1013,35 @@ EXPORT_SYMBOL(drm_arm_vblank_event);
1084 * 1013 *
1085 * Caller must hold event lock. Caller must also hold a vblank reference for 1014 * Caller must hold event lock. Caller must also hold a vblank reference for
1086 * the event @e, which will be dropped when the next vblank arrives. 1015 * the event @e, which will be dropped when the next vblank arrives.
1087 *
1088 * This is the native KMS version of drm_arm_vblank_event().
1089 */ 1016 */
1090void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, 1017void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
1091 struct drm_pending_vblank_event *e) 1018 struct drm_pending_vblank_event *e)
1092{ 1019{
1093 drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e); 1020 struct drm_device *dev = crtc->dev;
1021 unsigned int pipe = drm_crtc_index(crtc);
1022
1023 assert_spin_locked(&dev->event_lock);
1024
1025 e->pipe = pipe;
1026 e->event.sequence = drm_vblank_count(dev, pipe);
1027 list_add_tail(&e->base.link, &dev->vblank_event_list);
1094} 1028}
1095EXPORT_SYMBOL(drm_crtc_arm_vblank_event); 1029EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
1096 1030
1097/** 1031/**
1098 * drm_send_vblank_event - helper to send vblank event after pageflip 1032 * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
1099 * @dev: DRM device 1033 * @crtc: the source CRTC of the vblank event
1100 * @pipe: CRTC index
1101 * @e: the event to send 1034 * @e: the event to send
1102 * 1035 *
1103 * Updates sequence # and timestamp on event, and sends it to userspace. 1036 * Updates sequence # and timestamp on event, and sends it to userspace.
1104 * Caller must hold event lock. 1037 * Caller must hold event lock.
1105 *
1106 * This is the legacy version of drm_crtc_send_vblank_event().
1107 */ 1038 */
1108void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, 1039void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
1109 struct drm_pending_vblank_event *e) 1040 struct drm_pending_vblank_event *e)
1110{ 1041{
1042 struct drm_device *dev = crtc->dev;
1043 unsigned int seq, pipe = drm_crtc_index(crtc);
1111 struct timeval now; 1044 struct timeval now;
1112 unsigned int seq;
1113 1045
1114 if (dev->num_crtcs > 0) { 1046 if (dev->num_crtcs > 0) {
1115 seq = drm_vblank_count_and_time(dev, pipe, &now); 1047 seq = drm_vblank_count_and_time(dev, pipe, &now);
@@ -1121,23 +1053,6 @@ void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
1121 e->pipe = pipe; 1053 e->pipe = pipe;
1122 send_vblank_event(dev, e, seq, &now); 1054 send_vblank_event(dev, e, seq, &now);
1123} 1055}
1124EXPORT_SYMBOL(drm_send_vblank_event);
1125
1126/**
1127 * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
1128 * @crtc: the source CRTC of the vblank event
1129 * @e: the event to send
1130 *
1131 * Updates sequence # and timestamp on event, and sends it to userspace.
1132 * Caller must hold event lock.
1133 *
1134 * This is the native KMS version of drm_send_vblank_event().
1135 */
1136void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
1137 struct drm_pending_vblank_event *e)
1138{
1139 drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
1140}
1141EXPORT_SYMBOL(drm_crtc_send_vblank_event); 1056EXPORT_SYMBOL(drm_crtc_send_vblank_event);
1142 1057
1143/** 1058/**
@@ -1193,7 +1108,7 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
1193 * Returns: 1108 * Returns:
1194 * Zero on success or a negative error code on failure. 1109 * Zero on success or a negative error code on failure.
1195 */ 1110 */
1196int drm_vblank_get(struct drm_device *dev, unsigned int pipe) 1111static int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
1197{ 1112{
1198 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1113 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1199 unsigned long irqflags; 1114 unsigned long irqflags;
@@ -1219,7 +1134,6 @@ int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
1219 1134
1220 return ret; 1135 return ret;
1221} 1136}
1222EXPORT_SYMBOL(drm_vblank_get);
1223 1137
1224/** 1138/**
1225 * drm_crtc_vblank_get - get a reference count on vblank events 1139 * drm_crtc_vblank_get - get a reference count on vblank events
@@ -1228,8 +1142,6 @@ EXPORT_SYMBOL(drm_vblank_get);
1228 * Acquire a reference count on vblank events to avoid having them disabled 1142 * Acquire a reference count on vblank events to avoid having them disabled
1229 * while in use. 1143 * while in use.
1230 * 1144 *
1231 * This is the native kms version of drm_vblank_get().
1232 *
1233 * Returns: 1145 * Returns:
1234 * Zero on success or a negative error code on failure. 1146 * Zero on success or a negative error code on failure.
1235 */ 1147 */
@@ -1249,7 +1161,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_get);
1249 * 1161 *
1250 * This is the legacy version of drm_crtc_vblank_put(). 1162 * This is the legacy version of drm_crtc_vblank_put().
1251 */ 1163 */
1252void drm_vblank_put(struct drm_device *dev, unsigned int pipe) 1164static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
1253{ 1165{
1254 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1166 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1255 1167
@@ -1270,7 +1182,6 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
1270 jiffies + ((drm_vblank_offdelay * HZ)/1000)); 1182 jiffies + ((drm_vblank_offdelay * HZ)/1000));
1271 } 1183 }
1272} 1184}
1273EXPORT_SYMBOL(drm_vblank_put);
1274 1185
1275/** 1186/**
1276 * drm_crtc_vblank_put - give up ownership of vblank events 1187 * drm_crtc_vblank_put - give up ownership of vblank events
@@ -1278,8 +1189,6 @@ EXPORT_SYMBOL(drm_vblank_put);
1278 * 1189 *
1279 * Release ownership of a given vblank counter, turning off interrupts 1190 * Release ownership of a given vblank counter, turning off interrupts
1280 * if possible. Disable interrupts after drm_vblank_offdelay milliseconds. 1191 * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
1281 *
1282 * This is the native kms version of drm_vblank_put().
1283 */ 1192 */
1284void drm_crtc_vblank_put(struct drm_crtc *crtc) 1193void drm_crtc_vblank_put(struct drm_crtc *crtc)
1285{ 1194{
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index f5d80839a90c..49311fc61d5d 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -60,6 +60,21 @@ static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
60 return 0; 60 return 0;
61} 61}
62 62
63static int mipi_dsi_uevent(struct device *dev, struct kobj_uevent_env *env)
64{
65 struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
66 int err;
67
68 err = of_device_uevent_modalias(dev, env);
69 if (err != -ENODEV)
70 return err;
71
72 add_uevent_var(env, "MODALIAS=%s%s", MIPI_DSI_MODULE_PREFIX,
73 dsi->name);
74
75 return 0;
76}
77
63static const struct dev_pm_ops mipi_dsi_device_pm_ops = { 78static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
64 .runtime_suspend = pm_generic_runtime_suspend, 79 .runtime_suspend = pm_generic_runtime_suspend,
65 .runtime_resume = pm_generic_runtime_resume, 80 .runtime_resume = pm_generic_runtime_resume,
@@ -74,6 +89,7 @@ static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
74static struct bus_type mipi_dsi_bus_type = { 89static struct bus_type mipi_dsi_bus_type = {
75 .name = "mipi-dsi", 90 .name = "mipi-dsi",
76 .match = mipi_dsi_device_match, 91 .match = mipi_dsi_device_match,
92 .uevent = mipi_dsi_uevent,
77 .pm = &mipi_dsi_device_pm_ops, 93 .pm = &mipi_dsi_device_pm_ops,
78}; 94};
79 95
@@ -983,6 +999,28 @@ int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
983EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on); 999EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
984 1000
985/** 1001/**
1002 * mipi_dsi_set_tear_scanline() - turn on the display module's Tearing Effect
1003 * output signal on the TE signal line when display module reaches line N
1004 * defined by STS[n:0].
1005 * @dsi: DSI peripheral device
1006 * @param: STS[10:0]
1007 * Return: 0 on success or a negative error code on failure
1008 */
1009int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param)
1010{
1011 u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, param >> 8,
1012 param & 0xff };
1013 ssize_t err;
1014
1015 err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
1016 if (err < 0)
1017 return err;
1018
1019 return 0;
1020}
1021EXPORT_SYMBOL(mipi_dsi_set_tear_scanline);
1022
1023/**
986 * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image 1024 * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
987 * data used by the interface 1025 * data used by the interface
988 * @dsi: DSI peripheral device 1026 * @dsi: DSI peripheral device
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 04de6fd88f8c..cb39f45d6a16 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -179,12 +179,14 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
179int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) 179int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
180{ 180{
181 struct drm_mm_node *hole; 181 struct drm_mm_node *hole;
182 u64 end = node->start + node->size; 182 u64 end;
183 u64 hole_start; 183 u64 hole_start;
184 u64 hole_end; 184 u64 hole_end;
185 185
186 BUG_ON(node == NULL); 186 BUG_ON(node == NULL);
187 187
188 end = node->start + node->size;
189
188 /* Find the relevant hole to add our node to */ 190 /* Find the relevant hole to add our node to */
189 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { 191 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
190 if (hole_start > node->start || hole_end < end) 192 if (hole_start > node->start || hole_end < end)
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 7def3d58da18..fc5040ae5f25 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -544,6 +544,7 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
544 * 544 *
545 * This function is to create the modeline based on the GTF algorithm. 545 * This function is to create the modeline based on the GTF algorithm.
546 * Generalized Timing Formula is derived from: 546 * Generalized Timing Formula is derived from:
547 *
547 * GTF Spreadsheet by Andy Morrish (1/5/97) 548 * GTF Spreadsheet by Andy Morrish (1/5/97)
548 * available at http://www.vesa.org 549 * available at http://www.vesa.org
549 * 550 *
@@ -552,7 +553,8 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
552 * I also refer to the function of fb_get_mode in the file of 553 * I also refer to the function of fb_get_mode in the file of
553 * drivers/video/fbmon.c 554 * drivers/video/fbmon.c
554 * 555 *
555 * Standard GTF parameters: 556 * Standard GTF parameters::
557 *
556 * M = 600 558 * M = 600
557 * C = 40 559 * C = 40
558 * K = 128 560 * K = 128
@@ -1518,6 +1520,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
1518 if (out->status != MODE_OK) 1520 if (out->status != MODE_OK)
1519 goto out; 1521 goto out;
1520 1522
1523 drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
1524
1521 ret = 0; 1525 ret = 0;
1522 1526
1523out: 1527out:
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index e3a4adf03e7b..61146f5b4f56 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -30,14 +30,14 @@
30 * 30 *
31 * As KMS moves toward more fine grained locking, and atomic ioctl where 31 * As KMS moves toward more fine grained locking, and atomic ioctl where
32 * userspace can indirectly control locking order, it becomes necessary 32 * userspace can indirectly control locking order, it becomes necessary
33 * to use ww_mutex and acquire-contexts to avoid deadlocks. But because 33 * to use &ww_mutex and acquire-contexts to avoid deadlocks. But because
34 * the locking is more distributed around the driver code, we want a bit 34 * the locking is more distributed around the driver code, we want a bit
35 * of extra utility/tracking out of our acquire-ctx. This is provided 35 * of extra utility/tracking out of our acquire-ctx. This is provided
36 * by drm_modeset_lock / drm_modeset_acquire_ctx. 36 * by drm_modeset_lock / drm_modeset_acquire_ctx.
37 * 37 *
38 * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt 38 * For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.txt
39 * 39 *
40 * The basic usage pattern is to: 40 * The basic usage pattern is to::
41 * 41 *
42 * drm_modeset_acquire_init(&ctx) 42 * drm_modeset_acquire_init(&ctx)
43 * retry: 43 * retry:
@@ -51,6 +51,13 @@
51 * ... do stuff ... 51 * ... do stuff ...
52 * drm_modeset_drop_locks(&ctx); 52 * drm_modeset_drop_locks(&ctx);
53 * drm_modeset_acquire_fini(&ctx); 53 * drm_modeset_acquire_fini(&ctx);
54 *
55 * On top of of these per-object locks using &ww_mutex there's also an overall
56 * dev->mode_config.lock, for protecting everything else. Mostly this means
57 * probe state of connectors, and preventing hotplug add/removal of connectors.
58 *
59 * Finally there's a bunch of dedicated locks to protect drm core internal
60 * lists and lookup data structures.
54 */ 61 */
55 62
56/** 63/**
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 369d2898ff9e..fc51306fe365 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -219,10 +219,12 @@ EXPORT_SYMBOL(drm_plane_helper_check_update);
219 * 219 *
220 * Note that we make some assumptions about hardware limitations that may not be 220 * Note that we make some assumptions about hardware limitations that may not be
221 * true for all hardware -- 221 * true for all hardware --
222 * 1) Primary plane cannot be repositioned. 222 *
223 * 2) Primary plane cannot be scaled. 223 * 1. Primary plane cannot be repositioned.
224 * 3) Primary plane must cover the entire CRTC. 224 * 2. Primary plane cannot be scaled.
225 * 4) Subpixel positioning is not supported. 225 * 3. Primary plane must cover the entire CRTC.
226 * 4. Subpixel positioning is not supported.
227 *
226 * Drivers for hardware that don't have these restrictions can provide their 228 * Drivers for hardware that don't have these restrictions can provide their
227 * own implementation rather than using this helper. 229 * own implementation rather than using this helper.
228 * 230 *
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index aab0f3f1f42d..780589b420a4 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -593,7 +593,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
593 get_dma_buf(dma_buf); 593 get_dma_buf(dma_buf);
594 } 594 }
595 595
596 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 596 /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
597 ret = drm_gem_handle_create_tail(file_priv, obj, handle); 597 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
598 drm_gem_object_unreference_unlocked(obj); 598 drm_gem_object_unreference_unlocked(obj);
599 if (ret) 599 if (ret)
@@ -601,11 +601,10 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
601 601
602 ret = drm_prime_add_buf_handle(&file_priv->prime, 602 ret = drm_prime_add_buf_handle(&file_priv->prime,
603 dma_buf, *handle); 603 dma_buf, *handle);
604 mutex_unlock(&file_priv->prime.lock);
604 if (ret) 605 if (ret)
605 goto fail; 606 goto fail;
606 607
607 mutex_unlock(&file_priv->prime.lock);
608
609 dma_buf_put(dma_buf); 608 dma_buf_put(dma_buf);
610 609
611 return 0; 610 return 0;
@@ -615,11 +614,14 @@ fail:
615 * to detach.. which seems ok.. 614 * to detach.. which seems ok..
616 */ 615 */
617 drm_gem_handle_delete(file_priv, *handle); 616 drm_gem_handle_delete(file_priv, *handle);
617 dma_buf_put(dma_buf);
618 return ret;
619
618out_unlock: 620out_unlock:
619 mutex_unlock(&dev->object_name_lock); 621 mutex_unlock(&dev->object_name_lock);
620out_put: 622out_put:
621 dma_buf_put(dma_buf);
622 mutex_unlock(&file_priv->prime.lock); 623 mutex_unlock(&file_priv->prime.lock);
624 dma_buf_put(dma_buf);
623 return ret; 625 return ret;
624} 626}
625EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); 627EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 0329080d7f7c..a0df377d7d1c 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -82,13 +82,30 @@ drm_mode_validate_flag(const struct drm_display_mode *mode,
82 82
83static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) 83static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
84{ 84{
85 struct drm_cmdline_mode *cmdline_mode;
85 struct drm_display_mode *mode; 86 struct drm_display_mode *mode;
86 87
87 if (!connector->cmdline_mode.specified) 88 cmdline_mode = &connector->cmdline_mode;
89 if (!cmdline_mode->specified)
88 return 0; 90 return 0;
89 91
92 /* Only add a GTF mode if we find no matching probed modes */
93 list_for_each_entry(mode, &connector->probed_modes, head) {
94 if (mode->hdisplay != cmdline_mode->xres ||
95 mode->vdisplay != cmdline_mode->yres)
96 continue;
97
98 if (cmdline_mode->refresh_specified) {
99 /* The probed mode's vrefresh is set until later */
100 if (drm_mode_vrefresh(mode) != cmdline_mode->refresh)
101 continue;
102 }
103
104 return 0;
105 }
106
90 mode = drm_mode_create_from_cmdline_mode(connector->dev, 107 mode = drm_mode_create_from_cmdline_mode(connector->dev,
91 &connector->cmdline_mode); 108 cmdline_mode);
92 if (mode == NULL) 109 if (mode == NULL)
93 return 0; 110 return 0;
94 111
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
new file mode 100644
index 000000000000..b2071d495ada
--- /dev/null
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -0,0 +1,205 @@
1/*
2 * Copyright (C) 2016 Noralf Trønnes
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <drm/drmP.h>
11#include <drm/drm_atomic.h>
12#include <drm/drm_atomic_helper.h>
13#include <drm/drm_crtc_helper.h>
14#include <drm/drm_plane_helper.h>
15#include <drm/drm_simple_kms_helper.h>
16#include <linux/slab.h>
17
18/**
19 * DOC: overview
20 *
21 * This helper library provides helpers for drivers for simple display
22 * hardware.
23 *
24 * drm_simple_display_pipe_init() initializes a simple display pipeline
25 * which has only one full-screen scanout buffer feeding one output. The
26 * pipeline is represented by struct &drm_simple_display_pipe and binds
27 * together &drm_plane, &drm_crtc and &drm_encoder structures into one fixed
28 * entity. Some flexibility for code reuse is provided through a separately
29 * allocated &drm_connector object and supporting optional &drm_bridge
30 * encoder drivers.
31 */
32
33static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = {
34 .destroy = drm_encoder_cleanup,
35};
36
37static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc)
38{
39 struct drm_simple_display_pipe *pipe;
40
41 pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
42 if (!pipe->funcs || !pipe->funcs->enable)
43 return;
44
45 pipe->funcs->enable(pipe, crtc->state);
46}
47
48static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc)
49{
50 struct drm_simple_display_pipe *pipe;
51
52 pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
53 if (!pipe->funcs || !pipe->funcs->disable)
54 return;
55
56 pipe->funcs->disable(pipe);
57}
58
59static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = {
60 .disable = drm_simple_kms_crtc_disable,
61 .enable = drm_simple_kms_crtc_enable,
62};
63
64static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = {
65 .reset = drm_atomic_helper_crtc_reset,
66 .destroy = drm_crtc_cleanup,
67 .set_config = drm_atomic_helper_set_config,
68 .page_flip = drm_atomic_helper_page_flip,
69 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
70 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
71};
72
73static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
74 struct drm_plane_state *plane_state)
75{
76 struct drm_rect src = {
77 .x1 = plane_state->src_x,
78 .y1 = plane_state->src_y,
79 .x2 = plane_state->src_x + plane_state->src_w,
80 .y2 = plane_state->src_y + plane_state->src_h,
81 };
82 struct drm_rect dest = {
83 .x1 = plane_state->crtc_x,
84 .y1 = plane_state->crtc_y,
85 .x2 = plane_state->crtc_x + plane_state->crtc_w,
86 .y2 = plane_state->crtc_y + plane_state->crtc_h,
87 };
88 struct drm_rect clip = { 0 };
89 struct drm_simple_display_pipe *pipe;
90 struct drm_crtc_state *crtc_state;
91 bool visible;
92 int ret;
93
94 pipe = container_of(plane, struct drm_simple_display_pipe, plane);
95 crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
96 &pipe->crtc);
97 if (crtc_state->enable != !!plane_state->crtc)
98 return -EINVAL; /* plane must match crtc enable state */
99
100 if (!crtc_state->enable)
101 return 0; /* nothing to check when disabling or disabled */
102
103 clip.x2 = crtc_state->adjusted_mode.hdisplay;
104 clip.y2 = crtc_state->adjusted_mode.vdisplay;
105 ret = drm_plane_helper_check_update(plane, &pipe->crtc,
106 plane_state->fb,
107 &src, &dest, &clip,
108 DRM_PLANE_HELPER_NO_SCALING,
109 DRM_PLANE_HELPER_NO_SCALING,
110 false, true, &visible);
111 if (ret)
112 return ret;
113
114 if (!visible)
115 return -EINVAL;
116
117 if (!pipe->funcs || !pipe->funcs->check)
118 return 0;
119
120 return pipe->funcs->check(pipe, plane_state, crtc_state);
121}
122
123static void drm_simple_kms_plane_atomic_update(struct drm_plane *plane,
124 struct drm_plane_state *pstate)
125{
126 struct drm_simple_display_pipe *pipe;
127
128 pipe = container_of(plane, struct drm_simple_display_pipe, plane);
129 if (!pipe->funcs || !pipe->funcs->update)
130 return;
131
132 pipe->funcs->update(pipe, pstate);
133}
134
135static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = {
136 .atomic_check = drm_simple_kms_plane_atomic_check,
137 .atomic_update = drm_simple_kms_plane_atomic_update,
138};
139
140static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
141 .update_plane = drm_atomic_helper_update_plane,
142 .disable_plane = drm_atomic_helper_disable_plane,
143 .destroy = drm_plane_cleanup,
144 .reset = drm_atomic_helper_plane_reset,
145 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
146 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
147};
148
149/**
150 * drm_simple_display_pipe_init - Initialize a simple display pipeline
151 * @dev: DRM device
152 * @pipe: simple display pipe object to initialize
153 * @funcs: callbacks for the display pipe (optional)
154 * @formats: array of supported formats (%DRM_FORMAT_*)
155 * @format_count: number of elements in @formats
156 * @connector: connector to attach and register
157 *
158 * Sets up a display pipeline which consist of a really simple
159 * plane-crtc-encoder pipe coupled with the provided connector.
160 * Teardown of a simple display pipe is all handled automatically by the drm
161 * core through calling drm_mode_config_cleanup(). Drivers afterwards need to
162 * release the memory for the structure themselves.
163 *
164 * Returns:
165 * Zero on success, negative error code on failure.
166 */
167int drm_simple_display_pipe_init(struct drm_device *dev,
168 struct drm_simple_display_pipe *pipe,
169 const struct drm_simple_display_pipe_funcs *funcs,
170 const uint32_t *formats, unsigned int format_count,
171 struct drm_connector *connector)
172{
173 struct drm_encoder *encoder = &pipe->encoder;
174 struct drm_plane *plane = &pipe->plane;
175 struct drm_crtc *crtc = &pipe->crtc;
176 int ret;
177
178 pipe->connector = connector;
179 pipe->funcs = funcs;
180
181 drm_plane_helper_add(plane, &drm_simple_kms_plane_helper_funcs);
182 ret = drm_universal_plane_init(dev, plane, 0,
183 &drm_simple_kms_plane_funcs,
184 formats, format_count,
185 DRM_PLANE_TYPE_PRIMARY, NULL);
186 if (ret)
187 return ret;
188
189 drm_crtc_helper_add(crtc, &drm_simple_kms_crtc_helper_funcs);
190 ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
191 &drm_simple_kms_crtc_funcs, NULL);
192 if (ret)
193 return ret;
194
195 encoder->possible_crtcs = 1 << drm_crtc_index(crtc);
196 ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
197 DRM_MODE_ENCODER_NONE, NULL);
198 if (ret)
199 return ret;
200
201 return drm_mode_connector_attach_encoder(connector, encoder);
202}
203EXPORT_SYMBOL(drm_simple_display_pipe_init);
204
205MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index fa7fadce8063..32dd821b7202 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -32,75 +32,6 @@ static struct device_type drm_sysfs_device_minor = {
32 32
33struct class *drm_class; 33struct class *drm_class;
34 34
35/**
36 * __drm_class_suspend - internal DRM class suspend routine
37 * @dev: Linux device to suspend
38 * @state: power state to enter
39 *
40 * Just figures out what the actual struct drm_device associated with
41 * @dev is and calls its suspend hook, if present.
42 */
43static int __drm_class_suspend(struct device *dev, pm_message_t state)
44{
45 if (dev->type == &drm_sysfs_device_minor) {
46 struct drm_minor *drm_minor = to_drm_minor(dev);
47 struct drm_device *drm_dev = drm_minor->dev;
48
49 if (drm_minor->type == DRM_MINOR_LEGACY &&
50 !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
51 drm_dev->driver->suspend)
52 return drm_dev->driver->suspend(drm_dev, state);
53 }
54 return 0;
55}
56
57/**
58 * drm_class_suspend - internal DRM class suspend hook. Simply calls
59 * __drm_class_suspend() with the correct pm state.
60 * @dev: Linux device to suspend
61 */
62static int drm_class_suspend(struct device *dev)
63{
64 return __drm_class_suspend(dev, PMSG_SUSPEND);
65}
66
67/**
68 * drm_class_freeze - internal DRM class freeze hook. Simply calls
69 * __drm_class_suspend() with the correct pm state.
70 * @dev: Linux device to freeze
71 */
72static int drm_class_freeze(struct device *dev)
73{
74 return __drm_class_suspend(dev, PMSG_FREEZE);
75}
76
77/**
78 * drm_class_resume - DRM class resume hook
79 * @dev: Linux device to resume
80 *
81 * Just figures out what the actual struct drm_device associated with
82 * @dev is and calls its resume hook, if present.
83 */
84static int drm_class_resume(struct device *dev)
85{
86 if (dev->type == &drm_sysfs_device_minor) {
87 struct drm_minor *drm_minor = to_drm_minor(dev);
88 struct drm_device *drm_dev = drm_minor->dev;
89
90 if (drm_minor->type == DRM_MINOR_LEGACY &&
91 !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
92 drm_dev->driver->resume)
93 return drm_dev->driver->resume(drm_dev);
94 }
95 return 0;
96}
97
98static const struct dev_pm_ops drm_class_dev_pm_ops = {
99 .suspend = drm_class_suspend,
100 .resume = drm_class_resume,
101 .freeze = drm_class_freeze,
102};
103
104static char *drm_devnode(struct device *dev, umode_t *mode) 35static char *drm_devnode(struct device *dev, umode_t *mode)
105{ 36{
106 return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev)); 37 return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
@@ -131,8 +62,6 @@ int drm_sysfs_init(void)
131 if (IS_ERR(drm_class)) 62 if (IS_ERR(drm_class))
132 return PTR_ERR(drm_class); 63 return PTR_ERR(drm_class);
133 64
134 drm_class->pm = &drm_class_dev_pm_ops;
135
136 err = class_create_file(drm_class, &class_attr_version.attr); 65 err = class_create_file(drm_class, &class_attr_version.attr);
137 if (err) { 66 if (err) {
138 class_destroy(drm_class); 67 class_destroy(drm_class);
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 2f2ecde8285b..f306c8855978 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -127,6 +127,9 @@ EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
127 * used to implement weakly referenced lookups using kref_get_unless_zero(). 127 * used to implement weakly referenced lookups using kref_get_unless_zero().
128 * 128 *
129 * Example: 129 * Example:
130 *
131 * ::
132 *
130 * drm_vma_offset_lock_lookup(mgr); 133 * drm_vma_offset_lock_lookup(mgr);
131 * node = drm_vma_offset_lookup_locked(mgr); 134 * node = drm_vma_offset_lookup_locked(mgr);
132 * if (node) 135 * if (node)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index f5321e2f25ff..a69cdd526bf8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -125,7 +125,7 @@ struct etnaviv_gpu {
125 u32 completed_fence; 125 u32 completed_fence;
126 u32 retired_fence; 126 u32 retired_fence;
127 wait_queue_head_t fence_event; 127 wait_queue_head_t fence_event;
128 unsigned int fence_context; 128 u64 fence_context;
129 spinlock_t fence_spinlock; 129 spinlock_t fence_spinlock;
130 130
131 /* worker for handling active-list retiring: */ 131 /* worker for handling active-list retiring: */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 5e38e749ac17..ad6b73c7fc59 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -93,17 +93,8 @@ static int exynos_dpi_get_modes(struct drm_connector *connector)
93 return 0; 93 return 0;
94} 94}
95 95
96static struct drm_encoder *
97exynos_dpi_best_encoder(struct drm_connector *connector)
98{
99 struct exynos_dpi *ctx = connector_to_dpi(connector);
100
101 return &ctx->encoder;
102}
103
104static const struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = { 96static const struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
105 .get_modes = exynos_dpi_get_modes, 97 .get_modes = exynos_dpi_get_modes,
106 .best_encoder = exynos_dpi_best_encoder,
107}; 98};
108 99
109static int exynos_dpi_create_connector(struct drm_encoder *encoder) 100static int exynos_dpi_create_connector(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 2dd820e23b0c..4a679fb9bb02 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -267,6 +267,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
267{ 267{
268 struct exynos_drm_private *priv = dev->dev_private; 268 struct exynos_drm_private *priv = dev->dev_private;
269 struct exynos_atomic_commit *commit; 269 struct exynos_atomic_commit *commit;
270 struct drm_crtc *crtc;
271 struct drm_crtc_state *crtc_state;
270 int i, ret; 272 int i, ret;
271 273
272 commit = kzalloc(sizeof(*commit), GFP_KERNEL); 274 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
@@ -288,10 +290,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
288 /* Wait until all affected CRTCs have completed previous commits and 290 /* Wait until all affected CRTCs have completed previous commits and
289 * mark them as pending. 291 * mark them as pending.
290 */ 292 */
291 for (i = 0; i < dev->mode_config.num_crtc; ++i) { 293 for_each_crtc_in_state(state, crtc, crtc_state, i)
292 if (state->crtcs[i]) 294 commit->crtcs |= drm_crtc_mask(crtc);
293 commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
294 }
295 295
296 wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs)); 296 wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs));
297 297
@@ -299,7 +299,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
299 priv->pending |= commit->crtcs; 299 priv->pending |= commit->crtcs;
300 spin_unlock(&priv->lock); 300 spin_unlock(&priv->lock);
301 301
302 drm_atomic_helper_swap_state(dev, state); 302 drm_atomic_helper_swap_state(state, true);
303 303
304 if (nonblock) 304 if (nonblock)
305 schedule_work(&commit->work); 305 schedule_work(&commit->work);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 601ecf8006a7..e07cb1fe4860 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1566,17 +1566,8 @@ static int exynos_dsi_get_modes(struct drm_connector *connector)
1566 return 0; 1566 return 0;
1567} 1567}
1568 1568
1569static struct drm_encoder *
1570exynos_dsi_best_encoder(struct drm_connector *connector)
1571{
1572 struct exynos_dsi *dsi = connector_to_dsi(connector);
1573
1574 return &dsi->encoder;
1575}
1576
1577static const struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = { 1569static const struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
1578 .get_modes = exynos_dsi_get_modes, 1570 .get_modes = exynos_dsi_get_modes,
1579 .best_encoder = exynos_dsi_best_encoder,
1580}; 1571};
1581 1572
1582static int exynos_dsi_create_connector(struct drm_encoder *encoder) 1573static int exynos_dsi_create_connector(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 55f1d37c666a..77f12c00abf9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -242,7 +242,7 @@ exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
242 state->v_ratio == (1 << 15)) 242 state->v_ratio == (1 << 15))
243 height_ok = true; 243 height_ok = true;
244 244
245 if (width_ok & height_ok) 245 if (width_ok && height_ok)
246 return 0; 246 return 0;
247 247
248 DRM_DEBUG_KMS("scaling mode is not supported"); 248 DRM_DEBUG_KMS("scaling mode is not supported");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 608b0afa337f..e8f6c92b2a36 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -378,16 +378,8 @@ static int vidi_get_modes(struct drm_connector *connector)
378 return drm_add_edid_modes(connector, edid); 378 return drm_add_edid_modes(connector, edid);
379} 379}
380 380
381static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
382{
383 struct vidi_context *ctx = ctx_from_connector(connector);
384
385 return &ctx->encoder;
386}
387
388static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = { 381static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
389 .get_modes = vidi_get_modes, 382 .get_modes = vidi_get_modes,
390 .best_encoder = vidi_best_encoder,
391}; 383};
392 384
393static int vidi_create_connector(struct drm_encoder *encoder) 385static int vidi_create_connector(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 58de5a430508..1625d7c8a319 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -937,17 +937,9 @@ static int hdmi_mode_valid(struct drm_connector *connector,
937 return MODE_OK; 937 return MODE_OK;
938} 938}
939 939
940static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector)
941{
942 struct hdmi_context *hdata = connector_to_hdmi(connector);
943
944 return &hdata->encoder;
945}
946
947static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { 940static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
948 .get_modes = hdmi_get_modes, 941 .get_modes = hdmi_get_modes,
949 .mode_valid = hdmi_mode_valid, 942 .mode_valid = hdmi_mode_valid,
950 .best_encoder = hdmi_best_encoder,
951}; 943};
952 944
953static int hdmi_create_connector(struct drm_encoder *encoder) 945static int hdmi_create_connector(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 89c0084c2814..706de3278f1c 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -22,20 +22,21 @@
22#include "fsl_dcu_drm_drv.h" 22#include "fsl_dcu_drm_drv.h"
23#include "fsl_dcu_drm_plane.h" 23#include "fsl_dcu_drm_plane.h"
24 24
25static void fsl_dcu_drm_crtc_atomic_begin(struct drm_crtc *crtc, 25static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
26 struct drm_crtc_state *old_crtc_state) 26 struct drm_crtc_state *old_crtc_state)
27{ 27{
28} 28 struct drm_pending_vblank_event *event = crtc->state->event;
29 29
30static int fsl_dcu_drm_crtc_atomic_check(struct drm_crtc *crtc, 30 if (event) {
31 struct drm_crtc_state *state) 31 crtc->state->event = NULL;
32{
33 return 0;
34}
35 32
36static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, 33 spin_lock_irq(&crtc->dev->event_lock);
37 struct drm_crtc_state *old_crtc_state) 34 if (drm_crtc_vblank_get(crtc) == 0)
38{ 35 drm_crtc_arm_vblank_event(crtc, event);
36 else
37 drm_crtc_send_vblank_event(crtc, event);
38 spin_unlock_irq(&crtc->dev->event_lock);
39 }
39} 40}
40 41
41static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc) 42static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
@@ -117,8 +118,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
117} 118}
118 119
119static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = { 120static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
120 .atomic_begin = fsl_dcu_drm_crtc_atomic_begin,
121 .atomic_check = fsl_dcu_drm_crtc_atomic_check,
122 .atomic_flush = fsl_dcu_drm_crtc_atomic_flush, 121 .atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
123 .disable = fsl_dcu_drm_disable_crtc, 122 .disable = fsl_dcu_drm_disable_crtc,
124 .enable = fsl_dcu_drm_crtc_enable, 123 .enable = fsl_dcu_drm_crtc_enable,
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 0ec1ad961e0d..33727d5d826a 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -198,7 +198,7 @@ static struct drm_driver fsl_dcu_drm_driver = {
198 .get_vblank_counter = drm_vblank_no_hw_counter, 198 .get_vblank_counter = drm_vblank_no_hw_counter,
199 .enable_vblank = fsl_dcu_drm_enable_vblank, 199 .enable_vblank = fsl_dcu_drm_enable_vblank,
200 .disable_vblank = fsl_dcu_drm_disable_vblank, 200 .disable_vblank = fsl_dcu_drm_disable_vblank,
201 .gem_free_object = drm_gem_cma_free_object, 201 .gem_free_object_unlocked = drm_gem_cma_free_object,
202 .gem_vm_ops = &drm_gem_cma_vm_ops, 202 .gem_vm_ops = &drm_gem_cma_vm_ops,
203 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 203 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
204 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 204 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 98c998da91eb..0b0989e503ea 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -102,14 +102,6 @@ static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = {
102 .reset = drm_atomic_helper_connector_reset, 102 .reset = drm_atomic_helper_connector_reset,
103}; 103};
104 104
105static struct drm_encoder *
106fsl_dcu_drm_connector_best_encoder(struct drm_connector *connector)
107{
108 struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector);
109
110 return fsl_con->encoder;
111}
112
113static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector) 105static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
114{ 106{
115 struct fsl_dcu_drm_connector *fsl_connector; 107 struct fsl_dcu_drm_connector *fsl_connector;
@@ -136,7 +128,6 @@ static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
136} 128}
137 129
138static const struct drm_connector_helper_funcs connector_helper_funcs = { 130static const struct drm_connector_helper_funcs connector_helper_funcs = {
139 .best_encoder = fsl_dcu_drm_connector_best_encoder,
140 .get_modes = fsl_dcu_drm_connector_get_modes, 131 .get_modes = fsl_dcu_drm_connector_get_modes,
141 .mode_valid = fsl_dcu_drm_connector_mode_valid, 132 .mode_valid = fsl_dcu_drm_connector_mode_valid,
142}; 133};
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index c95406e6f44d..1a1cf7a3b5ef 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -175,20 +175,21 @@ void gma_crtc_load_lut(struct drm_crtc *crtc)
175 } 175 }
176} 176}
177 177
178void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, 178int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
179 u32 start, u32 size) 179 u32 size)
180{ 180{
181 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 181 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
182 int i; 182 int i;
183 int end = (start + size > 256) ? 256 : start + size;
184 183
185 for (i = start; i < end; i++) { 184 for (i = 0; i < size; i++) {
186 gma_crtc->lut_r[i] = red[i] >> 8; 185 gma_crtc->lut_r[i] = red[i] >> 8;
187 gma_crtc->lut_g[i] = green[i] >> 8; 186 gma_crtc->lut_g[i] = green[i] >> 8;
188 gma_crtc->lut_b[i] = blue[i] >> 8; 187 gma_crtc->lut_b[i] = blue[i] >> 8;
189 } 188 }
190 189
191 gma_crtc_load_lut(crtc); 190 gma_crtc_load_lut(crtc);
191
192 return 0;
192} 193}
193 194
194/** 195/**
@@ -281,7 +282,7 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
281 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); 282 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
282 283
283 /* Turn off vblank interrupts */ 284 /* Turn off vblank interrupts */
284 drm_vblank_off(dev, pipe); 285 drm_crtc_vblank_off(crtc);
285 286
286 /* Wait for vblank for the disable to take effect */ 287 /* Wait for vblank for the disable to take effect */
287 gma_wait_for_vblank(dev); 288 gma_wait_for_vblank(dev);
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index b2491c65f053..e72dd08b701b 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -72,8 +72,8 @@ extern int gma_crtc_cursor_set(struct drm_crtc *crtc,
72 uint32_t width, uint32_t height); 72 uint32_t width, uint32_t height);
73extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); 73extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
74extern void gma_crtc_load_lut(struct drm_crtc *crtc); 74extern void gma_crtc_load_lut(struct drm_crtc *crtc);
75extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 75extern int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
76 u16 *blue, u32 start, u32 size); 76 u16 *blue, u32 size);
77extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode); 77extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
78extern void gma_crtc_prepare(struct drm_crtc *crtc); 78extern void gma_crtc_prepare(struct drm_crtc *crtc);
79extern void gma_crtc_commit(struct drm_crtc *crtc); 79extern void gma_crtc_commit(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 398015be87e4..7b6c84925098 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -491,7 +491,6 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
491 struct drm_psb_private *dev_priv = dev->dev_private; 491 struct drm_psb_private *dev_priv = dev->dev_private;
492 struct gma_crtc *gma_crtc; 492 struct gma_crtc *gma_crtc;
493 int i; 493 int i;
494 uint16_t *r_base, *g_base, *b_base;
495 494
496 /* We allocate a extra array of drm_connector pointers 495 /* We allocate a extra array of drm_connector pointers
497 * for fbdev after the crtc */ 496 * for fbdev after the crtc */
@@ -519,16 +518,10 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
519 gma_crtc->pipe = pipe; 518 gma_crtc->pipe = pipe;
520 gma_crtc->plane = pipe; 519 gma_crtc->plane = pipe;
521 520
522 r_base = gma_crtc->base.gamma_store;
523 g_base = r_base + 256;
524 b_base = g_base + 256;
525 for (i = 0; i < 256; i++) { 521 for (i = 0; i < 256; i++) {
526 gma_crtc->lut_r[i] = i; 522 gma_crtc->lut_r[i] = i;
527 gma_crtc->lut_g[i] = i; 523 gma_crtc->lut_g[i] = i;
528 gma_crtc->lut_b[i] = i; 524 gma_crtc->lut_b[i] = i;
529 r_base[i] = i << 8;
530 g_base[i] = i << 8;
531 b_base[i] = i << 8;
532 525
533 gma_crtc->lut_adj[i] = 0; 526 gma_crtc->lut_adj[i] = 0;
534 } 527 }
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index fba6372d060e..ed76baad525f 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -502,13 +502,6 @@ static void ade_crtc_disable(struct drm_crtc *crtc)
502 acrtc->enable = false; 502 acrtc->enable = false;
503} 503}
504 504
505static int ade_crtc_atomic_check(struct drm_crtc *crtc,
506 struct drm_crtc_state *state)
507{
508 /* do nothing */
509 return 0;
510}
511
512static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc) 505static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc)
513{ 506{
514 struct ade_crtc *acrtc = to_ade_crtc(crtc); 507 struct ade_crtc *acrtc = to_ade_crtc(crtc);
@@ -537,6 +530,7 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
537{ 530{
538 struct ade_crtc *acrtc = to_ade_crtc(crtc); 531 struct ade_crtc *acrtc = to_ade_crtc(crtc);
539 struct ade_hw_ctx *ctx = acrtc->ctx; 532 struct ade_hw_ctx *ctx = acrtc->ctx;
533 struct drm_pending_vblank_event *event = crtc->state->event;
540 void __iomem *base = ctx->base; 534 void __iomem *base = ctx->base;
541 535
542 /* only crtc is enabled regs take effect */ 536 /* only crtc is enabled regs take effect */
@@ -545,12 +539,22 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
545 /* flush ade registers */ 539 /* flush ade registers */
546 writel(ADE_ENABLE, base + ADE_EN); 540 writel(ADE_ENABLE, base + ADE_EN);
547 } 541 }
542
543 if (event) {
544 crtc->state->event = NULL;
545
546 spin_lock_irq(&crtc->dev->event_lock);
547 if (drm_crtc_vblank_get(crtc) == 0)
548 drm_crtc_arm_vblank_event(crtc, event);
549 else
550 drm_crtc_send_vblank_event(crtc, event);
551 spin_unlock_irq(&crtc->dev->event_lock);
552 }
548} 553}
549 554
550static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = { 555static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = {
551 .enable = ade_crtc_enable, 556 .enable = ade_crtc_enable,
552 .disable = ade_crtc_disable, 557 .disable = ade_crtc_disable,
553 .atomic_check = ade_crtc_atomic_check,
554 .mode_set_nofb = ade_crtc_mode_set_nofb, 558 .mode_set_nofb = ade_crtc_mode_set_nofb,
555 .atomic_begin = ade_crtc_atomic_begin, 559 .atomic_begin = ade_crtc_atomic_begin,
556 .atomic_flush = ade_crtc_atomic_flush, 560 .atomic_flush = ade_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 3f94785fbcca..193657259ee9 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -173,7 +173,7 @@ static struct drm_driver kirin_drm_driver = {
173 .fops = &kirin_drm_fops, 173 .fops = &kirin_drm_fops,
174 .set_busid = drm_platform_set_busid, 174 .set_busid = drm_platform_set_busid,
175 175
176 .gem_free_object = drm_gem_cma_free_object, 176 .gem_free_object_unlocked = drm_gem_cma_free_object,
177 .gem_vm_ops = &drm_gem_cma_vm_ops, 177 .gem_vm_ops = &drm_gem_cma_vm_ops,
178 .dumb_create = kirin_gem_cma_dumb_create, 178 .dumb_create = kirin_gem_cma_dumb_create,
179 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 179 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0b88ba0f3c1f..7e2944406b8f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -59,6 +59,7 @@ i915-y += intel_audio.o \
59 intel_bios.o \ 59 intel_bios.o \
60 intel_color.o \ 60 intel_color.o \
61 intel_display.o \ 61 intel_display.o \
62 intel_dpio_phy.o \
62 intel_dpll_mgr.o \ 63 intel_dpll_mgr.o \
63 intel_fbc.o \ 64 intel_fbc.o \
64 intel_fifo_underrun.o \ 65 intel_fifo_underrun.o \
@@ -81,10 +82,12 @@ i915-y += dvo_ch7017.o \
81 dvo_tfp410.o \ 82 dvo_tfp410.o \
82 intel_crt.o \ 83 intel_crt.o \
83 intel_ddi.o \ 84 intel_ddi.o \
85 intel_dp_aux_backlight.o \
84 intel_dp_link_training.o \ 86 intel_dp_link_training.o \
85 intel_dp_mst.o \ 87 intel_dp_mst.o \
86 intel_dp.o \ 88 intel_dp.o \
87 intel_dsi.o \ 89 intel_dsi.o \
90 intel_dsi_dcs_backlight.o \
88 intel_dsi_panel_vbt.o \ 91 intel_dsi_panel_vbt.o \
89 intel_dsi_pll.o \ 92 intel_dsi_pll.o \
90 intel_dvo.o \ 93 intel_dvo.o \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a337f33bec5b..d97f28bfa9db 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -215,7 +215,8 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
215 CMD( MI_RS_CONTEXT, SMI, F, 1, S ), 215 CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
216 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), 216 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
217 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), 217 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
218 CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ), 218 CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
219 .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
219 CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ), 220 CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
220 CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ), 221 CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
221 CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ), 222 CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
@@ -750,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
750 int cmd_table_count; 751 int cmd_table_count;
751 int ret; 752 int ret;
752 753
753 if (!IS_GEN7(engine->dev)) 754 if (!IS_GEN7(engine->i915))
754 return 0; 755 return 0;
755 756
756 switch (engine->id) { 757 switch (engine->id) {
757 case RCS: 758 case RCS:
758 if (IS_HASWELL(engine->dev)) { 759 if (IS_HASWELL(engine->i915)) {
759 cmd_tables = hsw_render_ring_cmds; 760 cmd_tables = hsw_render_ring_cmds;
760 cmd_table_count = 761 cmd_table_count =
761 ARRAY_SIZE(hsw_render_ring_cmds); 762 ARRAY_SIZE(hsw_render_ring_cmds);
@@ -764,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
764 cmd_table_count = ARRAY_SIZE(gen7_render_cmds); 765 cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
765 } 766 }
766 767
767 if (IS_HASWELL(engine->dev)) { 768 if (IS_HASWELL(engine->i915)) {
768 engine->reg_tables = hsw_render_reg_tables; 769 engine->reg_tables = hsw_render_reg_tables;
769 engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables); 770 engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
770 } else { 771 } else {
@@ -780,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
780 engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; 781 engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
781 break; 782 break;
782 case BCS: 783 case BCS:
783 if (IS_HASWELL(engine->dev)) { 784 if (IS_HASWELL(engine->i915)) {
784 cmd_tables = hsw_blt_ring_cmds; 785 cmd_tables = hsw_blt_ring_cmds;
785 cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); 786 cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
786 } else { 787 } else {
@@ -788,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
788 cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); 789 cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
789 } 790 }
790 791
791 if (IS_HASWELL(engine->dev)) { 792 if (IS_HASWELL(engine->i915)) {
792 engine->reg_tables = hsw_blt_reg_tables; 793 engine->reg_tables = hsw_blt_reg_tables;
793 engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); 794 engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
794 } else { 795 } else {
@@ -1035,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
1035 if (!engine->needs_cmd_parser) 1036 if (!engine->needs_cmd_parser)
1036 return false; 1037 return false;
1037 1038
1038 if (!USES_PPGTT(engine->dev)) 1039 if (!USES_PPGTT(engine->i915))
1039 return false; 1040 return false;
1040 1041
1041 return (i915.enable_cmd_parser == 1); 1042 return (i915.enable_cmd_parser == 1);
@@ -1098,6 +1099,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
1098 return false; 1099 return false;
1099 } 1100 }
1100 1101
1102 if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
1103 DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n");
1104 return false;
1105 }
1106
1101 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1)) 1107 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
1102 *oacontrol_set = (cmd[offset + 1] != 0); 1108 *oacontrol_set = (cmd[offset + 1] != 0);
1103 } 1109 }
@@ -1113,6 +1119,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
1113 return false; 1119 return false;
1114 } 1120 }
1115 1121
1122 if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
1123 DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n",
1124 reg_addr);
1125 return false;
1126 }
1127
1116 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) && 1128 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
1117 (offset + 2 > length || 1129 (offset + 2 > length ||
1118 (cmd[offset + 1] & reg->mask) != reg->value)) { 1130 (cmd[offset + 1] & reg->mask) != reg->value)) {
@@ -1275,8 +1287,21 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
1275 * 1287 *
1276 * Return: the current version number of the cmd parser 1288 * Return: the current version number of the cmd parser
1277 */ 1289 */
1278int i915_cmd_parser_get_version(void) 1290int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
1279{ 1291{
1292 struct intel_engine_cs *engine;
1293 bool active = false;
1294
1295 /* If the command parser is not enabled, report 0 - unsupported */
1296 for_each_engine(engine, dev_priv) {
1297 if (i915_needs_cmd_parser(engine)) {
1298 active = true;
1299 break;
1300 }
1301 }
1302 if (!active)
1303 return 0;
1304
1280 /* 1305 /*
1281 * Command parser version history 1306 * Command parser version history
1282 * 1307 *
@@ -1288,6 +1313,7 @@ int i915_cmd_parser_get_version(void)
1288 * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3. 1313 * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
1289 * 5. GPGPU dispatch compute indirect registers. 1314 * 5. GPGPU dispatch compute indirect registers.
1290 * 6. TIMESTAMP register and Haswell CS GPR registers 1315 * 6. TIMESTAMP register and Haswell CS GPR registers
1316 * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
1291 */ 1317 */
1292 return 6; 1318 return 7;
1293} 1319}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 32690332d441..614ac085e51f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -89,17 +89,17 @@ static int i915_capabilities(struct seq_file *m, void *data)
89 return 0; 89 return 0;
90} 90}
91 91
92static const char get_active_flag(struct drm_i915_gem_object *obj) 92static char get_active_flag(struct drm_i915_gem_object *obj)
93{ 93{
94 return obj->active ? '*' : ' '; 94 return obj->active ? '*' : ' ';
95} 95}
96 96
97static const char get_pin_flag(struct drm_i915_gem_object *obj) 97static char get_pin_flag(struct drm_i915_gem_object *obj)
98{ 98{
99 return obj->pin_display ? 'p' : ' '; 99 return obj->pin_display ? 'p' : ' ';
100} 100}
101 101
102static const char get_tiling_flag(struct drm_i915_gem_object *obj) 102static char get_tiling_flag(struct drm_i915_gem_object *obj)
103{ 103{
104 switch (obj->tiling_mode) { 104 switch (obj->tiling_mode) {
105 default: 105 default:
@@ -109,12 +109,12 @@ static const char get_tiling_flag(struct drm_i915_gem_object *obj)
109 } 109 }
110} 110}
111 111
112static inline const char get_global_flag(struct drm_i915_gem_object *obj) 112static char get_global_flag(struct drm_i915_gem_object *obj)
113{ 113{
114 return i915_gem_obj_to_ggtt(obj) ? 'g' : ' '; 114 return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
115} 115}
116 116
117static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj) 117static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
118{ 118{
119 return obj->mapping ? 'M' : ' '; 119 return obj->mapping ? 'M' : ' ';
120} 120}
@@ -199,13 +199,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
199 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); 199 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
200} 200}
201 201
202static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
203{
204 seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
205 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
206 seq_putc(m, ' ');
207}
208
209static int i915_gem_object_list_info(struct seq_file *m, void *data) 202static int i915_gem_object_list_info(struct seq_file *m, void *data)
210{ 203{
211 struct drm_info_node *node = m->private; 204 struct drm_info_node *node = m->private;
@@ -424,6 +417,42 @@ static void print_batch_pool_stats(struct seq_file *m,
424 print_file_stats(m, "[k]batch pool", stats); 417 print_file_stats(m, "[k]batch pool", stats);
425} 418}
426 419
420static int per_file_ctx_stats(int id, void *ptr, void *data)
421{
422 struct i915_gem_context *ctx = ptr;
423 int n;
424
425 for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
426 if (ctx->engine[n].state)
427 per_file_stats(0, ctx->engine[n].state, data);
428 if (ctx->engine[n].ringbuf)
429 per_file_stats(0, ctx->engine[n].ringbuf->obj, data);
430 }
431
432 return 0;
433}
434
435static void print_context_stats(struct seq_file *m,
436 struct drm_i915_private *dev_priv)
437{
438 struct file_stats stats;
439 struct drm_file *file;
440
441 memset(&stats, 0, sizeof(stats));
442
443 mutex_lock(&dev_priv->dev->struct_mutex);
444 if (dev_priv->kernel_context)
445 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
446
447 list_for_each_entry(file, &dev_priv->dev->filelist, lhead) {
448 struct drm_i915_file_private *fpriv = file->driver_priv;
449 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
450 }
451 mutex_unlock(&dev_priv->dev->struct_mutex);
452
453 print_file_stats(m, "[k]contexts", stats);
454}
455
427#define count_vmas(list, member) do { \ 456#define count_vmas(list, member) do { \
428 list_for_each_entry(vma, list, member) { \ 457 list_for_each_entry(vma, list, member) { \
429 size += i915_gem_obj_total_ggtt_size(vma->obj); \ 458 size += i915_gem_obj_total_ggtt_size(vma->obj); \
@@ -528,10 +557,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
528 557
529 seq_putc(m, '\n'); 558 seq_putc(m, '\n');
530 print_batch_pool_stats(m, dev_priv); 559 print_batch_pool_stats(m, dev_priv);
531
532 mutex_unlock(&dev->struct_mutex); 560 mutex_unlock(&dev->struct_mutex);
533 561
534 mutex_lock(&dev->filelist_mutex); 562 mutex_lock(&dev->filelist_mutex);
563 print_context_stats(m, dev_priv);
535 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 564 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
536 struct file_stats stats; 565 struct file_stats stats;
537 struct task_struct *task; 566 struct task_struct *task;
@@ -607,18 +636,20 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
607 for_each_intel_crtc(dev, crtc) { 636 for_each_intel_crtc(dev, crtc) {
608 const char pipe = pipe_name(crtc->pipe); 637 const char pipe = pipe_name(crtc->pipe);
609 const char plane = plane_name(crtc->plane); 638 const char plane = plane_name(crtc->plane);
610 struct intel_unpin_work *work; 639 struct intel_flip_work *work;
611 640
612 spin_lock_irq(&dev->event_lock); 641 spin_lock_irq(&dev->event_lock);
613 work = crtc->unpin_work; 642 work = crtc->flip_work;
614 if (work == NULL) { 643 if (work == NULL) {
615 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 644 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
616 pipe, plane); 645 pipe, plane);
617 } else { 646 } else {
647 u32 pending;
618 u32 addr; 648 u32 addr;
619 649
620 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 650 pending = atomic_read(&work->pending);
621 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 651 if (pending) {
652 seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
622 pipe, plane); 653 pipe, plane);
623 } else { 654 } else {
624 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 655 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
@@ -638,11 +669,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
638 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", 669 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
639 work->flip_queued_vblank, 670 work->flip_queued_vblank,
640 work->flip_ready_vblank, 671 work->flip_ready_vblank,
641 drm_crtc_vblank_count(&crtc->base)); 672 intel_crtc_get_vblank_counter(crtc));
642 if (work->enable_stall_check)
643 seq_puts(m, "Stall check enabled, ");
644 else
645 seq_puts(m, "Stall check waiting for page flip ioctl, ");
646 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 673 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
647 674
648 if (INTEL_INFO(dev)->gen >= 4) 675 if (INTEL_INFO(dev)->gen >= 4)
@@ -1281,6 +1308,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1281 } 1308 }
1282 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1309 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1283 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); 1310 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1311 seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
1284 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1312 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1285 seq_printf(m, "Render p-state ratio: %d\n", 1313 seq_printf(m, "Render p-state ratio: %d\n",
1286 (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8); 1314 (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
@@ -1383,7 +1411,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
1383 seqno[id] = engine->get_seqno(engine); 1411 seqno[id] = engine->get_seqno(engine);
1384 } 1412 }
1385 1413
1386 i915_get_extra_instdone(dev, instdone); 1414 i915_get_extra_instdone(dev_priv, instdone);
1387 1415
1388 intel_runtime_pm_put(dev_priv); 1416 intel_runtime_pm_put(dev_priv);
1389 1417
@@ -1991,8 +2019,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
1991 struct drm_device *dev = node->minor->dev; 2019 struct drm_device *dev = node->minor->dev;
1992 struct drm_i915_private *dev_priv = dev->dev_private; 2020 struct drm_i915_private *dev_priv = dev->dev_private;
1993 struct intel_engine_cs *engine; 2021 struct intel_engine_cs *engine;
1994 struct intel_context *ctx; 2022 struct i915_gem_context *ctx;
1995 enum intel_engine_id id;
1996 int ret; 2023 int ret;
1997 2024
1998 ret = mutex_lock_interruptible(&dev->struct_mutex); 2025 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2000,32 +2027,36 @@ static int i915_context_status(struct seq_file *m, void *unused)
2000 return ret; 2027 return ret;
2001 2028
2002 list_for_each_entry(ctx, &dev_priv->context_list, link) { 2029 list_for_each_entry(ctx, &dev_priv->context_list, link) {
2003 if (!i915.enable_execlists && 2030 seq_printf(m, "HW context %u ", ctx->hw_id);
2004 ctx->legacy_hw_ctx.rcs_state == NULL) 2031 if (IS_ERR(ctx->file_priv)) {
2005 continue; 2032 seq_puts(m, "(deleted) ");
2006 2033 } else if (ctx->file_priv) {
2007 seq_puts(m, "HW context "); 2034 struct pid *pid = ctx->file_priv->file->pid;
2008 describe_ctx(m, ctx); 2035 struct task_struct *task;
2009 if (ctx == dev_priv->kernel_context)
2010 seq_printf(m, "(kernel context) ");
2011 2036
2012 if (i915.enable_execlists) { 2037 task = get_pid_task(pid, PIDTYPE_PID);
2013 seq_putc(m, '\n'); 2038 if (task) {
2014 for_each_engine_id(engine, dev_priv, id) { 2039 seq_printf(m, "(%s [%d]) ",
2015 struct drm_i915_gem_object *ctx_obj = 2040 task->comm, task->pid);
2016 ctx->engine[id].state; 2041 put_task_struct(task);
2017 struct intel_ringbuffer *ringbuf =
2018 ctx->engine[id].ringbuf;
2019
2020 seq_printf(m, "%s: ", engine->name);
2021 if (ctx_obj)
2022 describe_obj(m, ctx_obj);
2023 if (ringbuf)
2024 describe_ctx_ringbuf(m, ringbuf);
2025 seq_putc(m, '\n');
2026 } 2042 }
2027 } else { 2043 } else {
2028 describe_obj(m, ctx->legacy_hw_ctx.rcs_state); 2044 seq_puts(m, "(kernel) ");
2045 }
2046
2047 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
2048 seq_putc(m, '\n');
2049
2050 for_each_engine(engine, dev_priv) {
2051 struct intel_context *ce = &ctx->engine[engine->id];
2052
2053 seq_printf(m, "%s: ", engine->name);
2054 seq_putc(m, ce->initialised ? 'I' : 'i');
2055 if (ce->state)
2056 describe_obj(m, ce->state);
2057 if (ce->ringbuf)
2058 describe_ctx_ringbuf(m, ce->ringbuf);
2059 seq_putc(m, '\n');
2029 } 2060 }
2030 2061
2031 seq_putc(m, '\n'); 2062 seq_putc(m, '\n');
@@ -2037,24 +2068,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
2037} 2068}
2038 2069
2039static void i915_dump_lrc_obj(struct seq_file *m, 2070static void i915_dump_lrc_obj(struct seq_file *m,
2040 struct intel_context *ctx, 2071 struct i915_gem_context *ctx,
2041 struct intel_engine_cs *engine) 2072 struct intel_engine_cs *engine)
2042{ 2073{
2074 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
2043 struct page *page; 2075 struct page *page;
2044 uint32_t *reg_state; 2076 uint32_t *reg_state;
2045 int j; 2077 int j;
2046 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
2047 unsigned long ggtt_offset = 0; 2078 unsigned long ggtt_offset = 0;
2048 2079
2080 seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
2081
2049 if (ctx_obj == NULL) { 2082 if (ctx_obj == NULL) {
2050 seq_printf(m, "Context on %s with no gem object\n", 2083 seq_puts(m, "\tNot allocated\n");
2051 engine->name);
2052 return; 2084 return;
2053 } 2085 }
2054 2086
2055 seq_printf(m, "CONTEXT: %s %u\n", engine->name,
2056 intel_execlists_ctx_id(ctx, engine));
2057
2058 if (!i915_gem_obj_ggtt_bound(ctx_obj)) 2087 if (!i915_gem_obj_ggtt_bound(ctx_obj))
2059 seq_puts(m, "\tNot bound in GGTT\n"); 2088 seq_puts(m, "\tNot bound in GGTT\n");
2060 else 2089 else
@@ -2087,7 +2116,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
2087 struct drm_device *dev = node->minor->dev; 2116 struct drm_device *dev = node->minor->dev;
2088 struct drm_i915_private *dev_priv = dev->dev_private; 2117 struct drm_i915_private *dev_priv = dev->dev_private;
2089 struct intel_engine_cs *engine; 2118 struct intel_engine_cs *engine;
2090 struct intel_context *ctx; 2119 struct i915_gem_context *ctx;
2091 int ret; 2120 int ret;
2092 2121
2093 if (!i915.enable_execlists) { 2122 if (!i915.enable_execlists) {
@@ -2100,9 +2129,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
2100 return ret; 2129 return ret;
2101 2130
2102 list_for_each_entry(ctx, &dev_priv->context_list, link) 2131 list_for_each_entry(ctx, &dev_priv->context_list, link)
2103 if (ctx != dev_priv->kernel_context) 2132 for_each_engine(engine, dev_priv)
2104 for_each_engine(engine, dev_priv) 2133 i915_dump_lrc_obj(m, ctx, engine);
2105 i915_dump_lrc_obj(m, ctx, engine);
2106 2134
2107 mutex_unlock(&dev->struct_mutex); 2135 mutex_unlock(&dev->struct_mutex);
2108 2136
@@ -2173,8 +2201,8 @@ static int i915_execlists(struct seq_file *m, void *data)
2173 2201
2174 seq_printf(m, "\t%d requests in queue\n", count); 2202 seq_printf(m, "\t%d requests in queue\n", count);
2175 if (head_req) { 2203 if (head_req) {
2176 seq_printf(m, "\tHead request id: %u\n", 2204 seq_printf(m, "\tHead request context: %u\n",
2177 intel_execlists_ctx_id(head_req->ctx, engine)); 2205 head_req->ctx->hw_id);
2178 seq_printf(m, "\tHead request tail: %u\n", 2206 seq_printf(m, "\tHead request tail: %u\n",
2179 head_req->tail); 2207 head_req->tail);
2180 } 2208 }
@@ -2268,7 +2296,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
2268 2296
2269static int per_file_ctx(int id, void *ptr, void *data) 2297static int per_file_ctx(int id, void *ptr, void *data)
2270{ 2298{
2271 struct intel_context *ctx = ptr; 2299 struct i915_gem_context *ctx = ptr;
2272 struct seq_file *m = data; 2300 struct seq_file *m = data;
2273 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2301 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2274 2302
@@ -2313,12 +2341,12 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2313 struct drm_i915_private *dev_priv = dev->dev_private; 2341 struct drm_i915_private *dev_priv = dev->dev_private;
2314 struct intel_engine_cs *engine; 2342 struct intel_engine_cs *engine;
2315 2343
2316 if (INTEL_INFO(dev)->gen == 6) 2344 if (IS_GEN6(dev_priv))
2317 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 2345 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2318 2346
2319 for_each_engine(engine, dev_priv) { 2347 for_each_engine(engine, dev_priv) {
2320 seq_printf(m, "%s\n", engine->name); 2348 seq_printf(m, "%s\n", engine->name);
2321 if (INTEL_INFO(dev)->gen == 7) 2349 if (IS_GEN7(dev_priv))
2322 seq_printf(m, "GFX_MODE: 0x%08x\n", 2350 seq_printf(m, "GFX_MODE: 0x%08x\n",
2323 I915_READ(RING_MODE_GEN7(engine))); 2351 I915_READ(RING_MODE_GEN7(engine)));
2324 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", 2352 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
@@ -2365,16 +2393,16 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
2365 task = get_pid_task(file->pid, PIDTYPE_PID); 2393 task = get_pid_task(file->pid, PIDTYPE_PID);
2366 if (!task) { 2394 if (!task) {
2367 ret = -ESRCH; 2395 ret = -ESRCH;
2368 goto out_put; 2396 goto out_unlock;
2369 } 2397 }
2370 seq_printf(m, "\nproc: %s\n", task->comm); 2398 seq_printf(m, "\nproc: %s\n", task->comm);
2371 put_task_struct(task); 2399 put_task_struct(task);
2372 idr_for_each(&file_priv->context_idr, per_file_ctx, 2400 idr_for_each(&file_priv->context_idr, per_file_ctx,
2373 (void *)(unsigned long)m); 2401 (void *)(unsigned long)m);
2374 } 2402 }
2403out_unlock:
2375 mutex_unlock(&dev->filelist_mutex); 2404 mutex_unlock(&dev->filelist_mutex);
2376 2405
2377out_put:
2378 intel_runtime_pm_put(dev_priv); 2406 intel_runtime_pm_put(dev_priv);
2379 mutex_unlock(&dev->struct_mutex); 2407 mutex_unlock(&dev->struct_mutex);
2380 2408
@@ -2509,6 +2537,7 @@ static void i915_guc_client_info(struct seq_file *m,
2509 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", 2537 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
2510 client->wq_size, client->wq_offset, client->wq_tail); 2538 client->wq_size, client->wq_offset, client->wq_tail);
2511 2539
2540 seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
2512 seq_printf(m, "\tFailed to queue: %u\n", client->q_fail); 2541 seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
2513 seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); 2542 seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
2514 seq_printf(m, "\tLast submission result: %d\n", client->retcode); 2543 seq_printf(m, "\tLast submission result: %d\n", client->retcode);
@@ -3168,7 +3197,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
3168 enum intel_engine_id id; 3197 enum intel_engine_id id;
3169 int j, ret; 3198 int j, ret;
3170 3199
3171 if (!i915_semaphore_is_enabled(dev)) { 3200 if (!i915_semaphore_is_enabled(dev_priv)) {
3172 seq_puts(m, "Semaphores are disabled\n"); 3201 seq_puts(m, "Semaphores are disabled\n");
3173 return 0; 3202 return 0;
3174 } 3203 }
@@ -4769,7 +4798,7 @@ i915_wedged_set(void *data, u64 val)
4769 4798
4770 intel_runtime_pm_get(dev_priv); 4799 intel_runtime_pm_get(dev_priv);
4771 4800
4772 i915_handle_error(dev, val, 4801 i915_handle_error(dev_priv, val,
4773 "Manually setting wedged to %llu", val); 4802 "Manually setting wedged to %llu", val);
4774 4803
4775 intel_runtime_pm_put(dev_priv); 4804 intel_runtime_pm_put(dev_priv);
@@ -4919,7 +4948,7 @@ i915_drop_caches_set(void *data, u64 val)
4919 } 4948 }
4920 4949
4921 if (val & (DROP_RETIRE | DROP_ACTIVE)) 4950 if (val & (DROP_RETIRE | DROP_ACTIVE))
4922 i915_gem_retire_requests(dev); 4951 i915_gem_retire_requests(dev_priv);
4923 4952
4924 if (val & DROP_BOUND) 4953 if (val & DROP_BOUND)
4925 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); 4954 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
@@ -4993,7 +5022,7 @@ i915_max_freq_set(void *data, u64 val)
4993 5022
4994 dev_priv->rps.max_freq_softlimit = val; 5023 dev_priv->rps.max_freq_softlimit = val;
4995 5024
4996 intel_set_rps(dev, val); 5025 intel_set_rps(dev_priv, val);
4997 5026
4998 mutex_unlock(&dev_priv->rps.hw_lock); 5027 mutex_unlock(&dev_priv->rps.hw_lock);
4999 5028
@@ -5060,7 +5089,7 @@ i915_min_freq_set(void *data, u64 val)
5060 5089
5061 dev_priv->rps.min_freq_softlimit = val; 5090 dev_priv->rps.min_freq_softlimit = val;
5062 5091
5063 intel_set_rps(dev, val); 5092 intel_set_rps(dev_priv, val);
5064 5093
5065 mutex_unlock(&dev_priv->rps.hw_lock); 5094 mutex_unlock(&dev_priv->rps.hw_lock);
5066 5095
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index b3198fcd0536..07edaed9d5a2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -186,7 +186,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
186 value = 1; 186 value = 1;
187 break; 187 break;
188 case I915_PARAM_HAS_SEMAPHORES: 188 case I915_PARAM_HAS_SEMAPHORES:
189 value = i915_semaphore_is_enabled(dev); 189 value = i915_semaphore_is_enabled(dev_priv);
190 break; 190 break;
191 case I915_PARAM_HAS_PRIME_VMAP_FLUSH: 191 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
192 value = 1; 192 value = 1;
@@ -204,7 +204,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
204 value = 1; 204 value = 1;
205 break; 205 break;
206 case I915_PARAM_CMD_PARSER_VERSION: 206 case I915_PARAM_CMD_PARSER_VERSION:
207 value = i915_cmd_parser_get_version(); 207 value = i915_cmd_parser_get_version(dev_priv);
208 break; 208 break;
209 case I915_PARAM_HAS_COHERENT_PHYS_GTT: 209 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
210 value = 1; 210 value = 1;
@@ -223,8 +223,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
223 return -ENODEV; 223 return -ENODEV;
224 break; 224 break;
225 case I915_PARAM_HAS_GPU_RESET: 225 case I915_PARAM_HAS_GPU_RESET:
226 value = i915.enable_hangcheck && 226 value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
227 intel_has_gpu_reset(dev);
228 break; 227 break;
229 case I915_PARAM_HAS_RESOURCE_STREAMER: 228 case I915_PARAM_HAS_RESOURCE_STREAMER:
230 value = HAS_RESOURCE_STREAMER(dev); 229 value = HAS_RESOURCE_STREAMER(dev);
@@ -425,6 +424,43 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
425 .can_switch = i915_switcheroo_can_switch, 424 .can_switch = i915_switcheroo_can_switch,
426}; 425};
427 426
427static void i915_gem_fini(struct drm_device *dev)
428{
429 struct drm_i915_private *dev_priv = to_i915(dev);
430
431 /*
432 * Neither the BIOS, ourselves or any other kernel
433 * expects the system to be in execlists mode on startup,
434 * so we need to reset the GPU back to legacy mode. And the only
435 * known way to disable logical contexts is through a GPU reset.
436 *
437 * So in order to leave the system in a known default configuration,
438 * always reset the GPU upon unload. Afterwards we then clean up the
439 * GEM state tracking, flushing off the requests and leaving the
440 * system in a known idle state.
441 *
442 * Note that is of the upmost importance that the GPU is idle and
443 * all stray writes are flushed *before* we dismantle the backing
444 * storage for the pinned objects.
445 *
446 * However, since we are uncertain that reseting the GPU on older
447 * machines is a good idea, we don't - just in case it leaves the
448 * machine in an unusable condition.
449 */
450 if (HAS_HW_CONTEXTS(dev)) {
451 int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
452 WARN_ON(reset && reset != -ENODEV);
453 }
454
455 mutex_lock(&dev->struct_mutex);
456 i915_gem_reset(dev);
457 i915_gem_cleanup_engines(dev);
458 i915_gem_context_fini(dev);
459 mutex_unlock(&dev->struct_mutex);
460
461 WARN_ON(!list_empty(&to_i915(dev)->context_list));
462}
463
428static int i915_load_modeset_init(struct drm_device *dev) 464static int i915_load_modeset_init(struct drm_device *dev)
429{ 465{
430 struct drm_i915_private *dev_priv = dev->dev_private; 466 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -454,6 +490,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
454 if (ret) 490 if (ret)
455 goto cleanup_vga_client; 491 goto cleanup_vga_client;
456 492
493 /* must happen before intel_power_domains_init_hw() on VLV/CHV */
494 intel_update_rawclk(dev_priv);
495
457 intel_power_domains_init_hw(dev_priv, false); 496 intel_power_domains_init_hw(dev_priv, false);
458 497
459 intel_csr_ucode_init(dev_priv); 498 intel_csr_ucode_init(dev_priv);
@@ -468,7 +507,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
468 * working irqs for e.g. gmbus and dp aux transfers. */ 507 * working irqs for e.g. gmbus and dp aux transfers. */
469 intel_modeset_init(dev); 508 intel_modeset_init(dev);
470 509
471 intel_guc_ucode_init(dev); 510 intel_guc_init(dev);
472 511
473 ret = i915_gem_init(dev); 512 ret = i915_gem_init(dev);
474 if (ret) 513 if (ret)
@@ -503,12 +542,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
503 return 0; 542 return 0;
504 543
505cleanup_gem: 544cleanup_gem:
506 mutex_lock(&dev->struct_mutex); 545 i915_gem_fini(dev);
507 i915_gem_cleanup_engines(dev);
508 i915_gem_context_fini(dev);
509 mutex_unlock(&dev->struct_mutex);
510cleanup_irq: 546cleanup_irq:
511 intel_guc_ucode_fini(dev); 547 intel_guc_fini(dev);
512 drm_irq_uninstall(dev); 548 drm_irq_uninstall(dev);
513 intel_teardown_gmbus(dev); 549 intel_teardown_gmbus(dev);
514cleanup_csr: 550cleanup_csr:
@@ -850,7 +886,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
850 DRM_INFO("Display disabled (module parameter)\n"); 886 DRM_INFO("Display disabled (module parameter)\n");
851 info->num_pipes = 0; 887 info->num_pipes = 0;
852 } else if (info->num_pipes > 0 && 888 } else if (info->num_pipes > 0 &&
853 (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && 889 (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
854 HAS_PCH_SPLIT(dev)) { 890 HAS_PCH_SPLIT(dev)) {
855 u32 fuse_strap = I915_READ(FUSE_STRAP); 891 u32 fuse_strap = I915_READ(FUSE_STRAP);
856 u32 sfuse_strap = I915_READ(SFUSE_STRAP); 892 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
@@ -874,7 +910,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
874 DRM_INFO("PipeC fused off\n"); 910 DRM_INFO("PipeC fused off\n");
875 info->num_pipes -= 1; 911 info->num_pipes -= 1;
876 } 912 }
877 } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) { 913 } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
878 u32 dfsm = I915_READ(SKL_DFSM); 914 u32 dfsm = I915_READ(SKL_DFSM);
879 u8 disabled_mask = 0; 915 u8 disabled_mask = 0;
880 bool invalid; 916 bool invalid;
@@ -915,9 +951,11 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
915 else if (INTEL_INFO(dev)->gen >= 9) 951 else if (INTEL_INFO(dev)->gen >= 9)
916 gen9_sseu_info_init(dev); 952 gen9_sseu_info_init(dev);
917 953
918 /* Snooping is broken on BXT A stepping. */
919 info->has_snoop = !info->has_llc; 954 info->has_snoop = !info->has_llc;
920 info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1); 955
956 /* Snooping is broken on BXT A stepping. */
957 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
958 info->has_snoop = false;
921 959
922 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); 960 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
923 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); 961 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
@@ -930,6 +968,20 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
930 info->has_subslice_pg ? "y" : "n"); 968 info->has_subslice_pg ? "y" : "n");
931 DRM_DEBUG_DRIVER("has EU power gating: %s\n", 969 DRM_DEBUG_DRIVER("has EU power gating: %s\n",
932 info->has_eu_pg ? "y" : "n"); 970 info->has_eu_pg ? "y" : "n");
971
972 i915.enable_execlists =
973 intel_sanitize_enable_execlists(dev_priv,
974 i915.enable_execlists);
975
976 /*
977 * i915.enable_ppgtt is read-only, so do an early pass to validate the
978 * user's requested state against the hardware/driver capabilities. We
979 * do this now so that we can print out any log messages once rather
980 * than every time we check intel_enable_ppgtt().
981 */
982 i915.enable_ppgtt =
983 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
984 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
933} 985}
934 986
935static void intel_init_dpio(struct drm_i915_private *dev_priv) 987static void intel_init_dpio(struct drm_i915_private *dev_priv)
@@ -1020,6 +1072,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
1020 memcpy(device_info, info, sizeof(dev_priv->info)); 1072 memcpy(device_info, info, sizeof(dev_priv->info));
1021 device_info->device_id = dev->pdev->device; 1073 device_info->device_id = dev->pdev->device;
1022 1074
1075 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
1076 device_info->gen_mask = BIT(device_info->gen - 1);
1077
1023 spin_lock_init(&dev_priv->irq_lock); 1078 spin_lock_init(&dev_priv->irq_lock);
1024 spin_lock_init(&dev_priv->gpu_error.lock); 1079 spin_lock_init(&dev_priv->gpu_error.lock);
1025 mutex_init(&dev_priv->backlight_lock); 1080 mutex_init(&dev_priv->backlight_lock);
@@ -1137,7 +1192,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
1137 if (ret < 0) 1192 if (ret < 0)
1138 goto put_bridge; 1193 goto put_bridge;
1139 1194
1140 intel_uncore_init(dev); 1195 intel_uncore_init(dev_priv);
1141 1196
1142 return 0; 1197 return 0;
1143 1198
@@ -1155,7 +1210,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
1155{ 1210{
1156 struct drm_device *dev = dev_priv->dev; 1211 struct drm_device *dev = dev_priv->dev;
1157 1212
1158 intel_uncore_fini(dev); 1213 intel_uncore_fini(dev_priv);
1159 i915_mmio_cleanup(dev); 1214 i915_mmio_cleanup(dev);
1160 pci_dev_put(dev_priv->bridge_dev); 1215 pci_dev_put(dev_priv->bridge_dev);
1161} 1216}
@@ -1206,8 +1261,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1206 pci_set_master(dev->pdev); 1261 pci_set_master(dev->pdev);
1207 1262
1208 /* overlay on gen2 is broken and can't address above 1G */ 1263 /* overlay on gen2 is broken and can't address above 1G */
1209 if (IS_GEN2(dev)) 1264 if (IS_GEN2(dev)) {
1210 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 1265 ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1266 if (ret) {
1267 DRM_ERROR("failed to set DMA mask\n");
1268
1269 goto out_ggtt;
1270 }
1271 }
1272
1211 1273
1212 /* 965GM sometimes incorrectly writes to hardware status page (HWS) 1274 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1213 * using 32bit addressing, overwriting memory if HWS is located 1275 * using 32bit addressing, overwriting memory if HWS is located
@@ -1217,8 +1279,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1217 * behaviour if any general state is accessed within a page above 4GB, 1279 * behaviour if any general state is accessed within a page above 4GB,
1218 * which also needs to be handled carefully. 1280 * which also needs to be handled carefully.
1219 */ 1281 */
1220 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 1282 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
1221 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); 1283 ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1284
1285 if (ret) {
1286 DRM_ERROR("failed to set DMA mask\n");
1287
1288 goto out_ggtt;
1289 }
1290 }
1222 1291
1223 aperture_size = ggtt->mappable_end; 1292 aperture_size = ggtt->mappable_end;
1224 1293
@@ -1236,9 +1305,9 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1236 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, 1305 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1237 PM_QOS_DEFAULT_VALUE); 1306 PM_QOS_DEFAULT_VALUE);
1238 1307
1239 intel_uncore_sanitize(dev); 1308 intel_uncore_sanitize(dev_priv);
1240 1309
1241 intel_opregion_setup(dev); 1310 intel_opregion_setup(dev_priv);
1242 1311
1243 i915_gem_load_init_fences(dev_priv); 1312 i915_gem_load_init_fences(dev_priv);
1244 1313
@@ -1300,14 +1369,14 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
1300 * Notify a valid surface after modesetting, 1369 * Notify a valid surface after modesetting,
1301 * when running inside a VM. 1370 * when running inside a VM.
1302 */ 1371 */
1303 if (intel_vgpu_active(dev)) 1372 if (intel_vgpu_active(dev_priv))
1304 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); 1373 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1305 1374
1306 i915_setup_sysfs(dev); 1375 i915_setup_sysfs(dev);
1307 1376
1308 if (INTEL_INFO(dev_priv)->num_pipes) { 1377 if (INTEL_INFO(dev_priv)->num_pipes) {
1309 /* Must be done after probing outputs */ 1378 /* Must be done after probing outputs */
1310 intel_opregion_init(dev); 1379 intel_opregion_register(dev_priv);
1311 acpi_video_register(); 1380 acpi_video_register();
1312 } 1381 }
1313 1382
@@ -1326,7 +1395,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1326 i915_audio_component_cleanup(dev_priv); 1395 i915_audio_component_cleanup(dev_priv);
1327 intel_gpu_ips_teardown(); 1396 intel_gpu_ips_teardown();
1328 acpi_video_unregister(); 1397 acpi_video_unregister();
1329 intel_opregion_fini(dev_priv->dev); 1398 intel_opregion_unregister(dev_priv);
1330 i915_teardown_sysfs(dev_priv->dev); 1399 i915_teardown_sysfs(dev_priv->dev);
1331 i915_gem_shrinker_cleanup(dev_priv); 1400 i915_gem_shrinker_cleanup(dev_priv);
1332} 1401}
@@ -1458,11 +1527,8 @@ int i915_driver_unload(struct drm_device *dev)
1458 /* Flush any outstanding unpin_work. */ 1527 /* Flush any outstanding unpin_work. */
1459 flush_workqueue(dev_priv->wq); 1528 flush_workqueue(dev_priv->wq);
1460 1529
1461 intel_guc_ucode_fini(dev); 1530 intel_guc_fini(dev);
1462 mutex_lock(&dev->struct_mutex); 1531 i915_gem_fini(dev);
1463 i915_gem_cleanup_engines(dev);
1464 i915_gem_context_fini(dev);
1465 mutex_unlock(&dev->struct_mutex);
1466 intel_fbc_cleanup_cfb(dev_priv); 1532 intel_fbc_cleanup_cfb(dev_priv);
1467 1533
1468 intel_power_domains_fini(dev_priv); 1534 intel_power_domains_fini(dev_priv);
@@ -1570,15 +1636,15 @@ const struct drm_ioctl_desc i915_ioctls[] = {
1570 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), 1636 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1571 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), 1637 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
1572 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), 1638 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
1573 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW), 1639 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
1574 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW), 1640 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
1575 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), 1641 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
1576 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), 1642 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
1577 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1643 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1578 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), 1644 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1579 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), 1645 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1580 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), 1646 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1581 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW), 1647 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
1582 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), 1648 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1583 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), 1649 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1584 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), 1650 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f313b4d8344f..872c60608dbd 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -35,11 +35,9 @@
35#include "i915_trace.h" 35#include "i915_trace.h"
36#include "intel_drv.h" 36#include "intel_drv.h"
37 37
38#include <linux/apple-gmux.h>
39#include <linux/console.h> 38#include <linux/console.h>
40#include <linux/module.h> 39#include <linux/module.h>
41#include <linux/pm_runtime.h> 40#include <linux/pm_runtime.h>
42#include <linux/vgaarb.h>
43#include <linux/vga_switcheroo.h> 41#include <linux/vga_switcheroo.h>
44#include <drm/drm_crtc_helper.h> 42#include <drm/drm_crtc_helper.h>
45 43
@@ -300,22 +298,26 @@ static const struct intel_device_info intel_haswell_m_info = {
300static const struct intel_device_info intel_broadwell_d_info = { 298static const struct intel_device_info intel_broadwell_d_info = {
301 BDW_FEATURES, 299 BDW_FEATURES,
302 .gen = 8, 300 .gen = 8,
301 .is_broadwell = 1,
303}; 302};
304 303
305static const struct intel_device_info intel_broadwell_m_info = { 304static const struct intel_device_info intel_broadwell_m_info = {
306 BDW_FEATURES, 305 BDW_FEATURES,
307 .gen = 8, .is_mobile = 1, 306 .gen = 8, .is_mobile = 1,
307 .is_broadwell = 1,
308}; 308};
309 309
310static const struct intel_device_info intel_broadwell_gt3d_info = { 310static const struct intel_device_info intel_broadwell_gt3d_info = {
311 BDW_FEATURES, 311 BDW_FEATURES,
312 .gen = 8, 312 .gen = 8,
313 .is_broadwell = 1,
313 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 314 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
314}; 315};
315 316
316static const struct intel_device_info intel_broadwell_gt3m_info = { 317static const struct intel_device_info intel_broadwell_gt3m_info = {
317 BDW_FEATURES, 318 BDW_FEATURES,
318 .gen = 8, .is_mobile = 1, 319 .gen = 8, .is_mobile = 1,
320 .is_broadwell = 1,
319 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 321 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
320}; 322};
321 323
@@ -530,9 +532,9 @@ void intel_detect_pch(struct drm_device *dev)
530 pci_dev_put(pch); 532 pci_dev_put(pch);
531} 533}
532 534
533bool i915_semaphore_is_enabled(struct drm_device *dev) 535bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
534{ 536{
535 if (INTEL_INFO(dev)->gen < 6) 537 if (INTEL_GEN(dev_priv) < 6)
536 return false; 538 return false;
537 539
538 if (i915.semaphores >= 0) 540 if (i915.semaphores >= 0)
@@ -542,13 +544,9 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
542 if (i915.enable_execlists) 544 if (i915.enable_execlists)
543 return false; 545 return false;
544 546
545 /* Until we get further testing... */
546 if (IS_GEN8(dev))
547 return false;
548
549#ifdef CONFIG_INTEL_IOMMU 547#ifdef CONFIG_INTEL_IOMMU
550 /* Enable semaphores on SNB when IO remapping is off */ 548 /* Enable semaphores on SNB when IO remapping is off */
551 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) 549 if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
552 return false; 550 return false;
553#endif 551#endif
554 552
@@ -610,7 +608,7 @@ static int i915_drm_suspend(struct drm_device *dev)
610 608
611 intel_guc_suspend(dev); 609 intel_guc_suspend(dev);
612 610
613 intel_suspend_gt_powersave(dev); 611 intel_suspend_gt_powersave(dev_priv);
614 612
615 intel_display_suspend(dev); 613 intel_display_suspend(dev);
616 614
@@ -628,10 +626,10 @@ static int i915_drm_suspend(struct drm_device *dev)
628 i915_save_state(dev); 626 i915_save_state(dev);
629 627
630 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; 628 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
631 intel_opregion_notify_adapter(dev, opregion_target_state); 629 intel_opregion_notify_adapter(dev_priv, opregion_target_state);
632 630
633 intel_uncore_forcewake_reset(dev, false); 631 intel_uncore_forcewake_reset(dev_priv, false);
634 intel_opregion_fini(dev); 632 intel_opregion_unregister(dev_priv);
635 633
636 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 634 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
637 635
@@ -749,7 +747,7 @@ static int i915_drm_resume(struct drm_device *dev)
749 mutex_unlock(&dev->struct_mutex); 747 mutex_unlock(&dev->struct_mutex);
750 748
751 i915_restore_state(dev); 749 i915_restore_state(dev);
752 intel_opregion_setup(dev); 750 intel_opregion_setup(dev_priv);
753 751
754 intel_init_pch_refclk(dev); 752 intel_init_pch_refclk(dev);
755 drm_mode_config_reset(dev); 753 drm_mode_config_reset(dev);
@@ -777,7 +775,7 @@ static int i915_drm_resume(struct drm_device *dev)
777 775
778 spin_lock_irq(&dev_priv->irq_lock); 776 spin_lock_irq(&dev_priv->irq_lock);
779 if (dev_priv->display.hpd_irq_setup) 777 if (dev_priv->display.hpd_irq_setup)
780 dev_priv->display.hpd_irq_setup(dev); 778 dev_priv->display.hpd_irq_setup(dev_priv);
781 spin_unlock_irq(&dev_priv->irq_lock); 779 spin_unlock_irq(&dev_priv->irq_lock);
782 780
783 intel_dp_mst_resume(dev); 781 intel_dp_mst_resume(dev);
@@ -794,7 +792,7 @@ static int i915_drm_resume(struct drm_device *dev)
794 /* Config may have changed between suspend and resume */ 792 /* Config may have changed between suspend and resume */
795 drm_helper_hpd_irq_event(dev); 793 drm_helper_hpd_irq_event(dev);
796 794
797 intel_opregion_init(dev); 795 intel_opregion_register(dev_priv);
798 796
799 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); 797 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
800 798
@@ -802,7 +800,7 @@ static int i915_drm_resume(struct drm_device *dev)
802 dev_priv->modeset_restore = MODESET_DONE; 800 dev_priv->modeset_restore = MODESET_DONE;
803 mutex_unlock(&dev_priv->modeset_restore_lock); 801 mutex_unlock(&dev_priv->modeset_restore_lock);
804 802
805 intel_opregion_notify_adapter(dev, PCI_D0); 803 intel_opregion_notify_adapter(dev_priv, PCI_D0);
806 804
807 drm_kms_helper_poll_enable(dev); 805 drm_kms_helper_poll_enable(dev);
808 806
@@ -870,9 +868,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
870 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", 868 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
871 ret); 869 ret);
872 870
873 intel_uncore_early_sanitize(dev, true); 871 intel_uncore_early_sanitize(dev_priv, true);
874 872
875 if (IS_BROXTON(dev)) { 873 if (IS_BROXTON(dev_priv)) {
876 if (!dev_priv->suspended_to_idle) 874 if (!dev_priv->suspended_to_idle)
877 gen9_sanitize_dc_state(dev_priv); 875 gen9_sanitize_dc_state(dev_priv);
878 bxt_disable_dc9(dev_priv); 876 bxt_disable_dc9(dev_priv);
@@ -880,7 +878,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
880 hsw_disable_pc8(dev_priv); 878 hsw_disable_pc8(dev_priv);
881 } 879 }
882 880
883 intel_uncore_sanitize(dev); 881 intel_uncore_sanitize(dev_priv);
884 882
885 if (IS_BROXTON(dev_priv) || 883 if (IS_BROXTON(dev_priv) ||
886 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) 884 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
@@ -923,14 +921,14 @@ int i915_resume_switcheroo(struct drm_device *dev)
923 * - re-init interrupt state 921 * - re-init interrupt state
924 * - re-init display 922 * - re-init display
925 */ 923 */
926int i915_reset(struct drm_device *dev) 924int i915_reset(struct drm_i915_private *dev_priv)
927{ 925{
928 struct drm_i915_private *dev_priv = dev->dev_private; 926 struct drm_device *dev = dev_priv->dev;
929 struct i915_gpu_error *error = &dev_priv->gpu_error; 927 struct i915_gpu_error *error = &dev_priv->gpu_error;
930 unsigned reset_counter; 928 unsigned reset_counter;
931 int ret; 929 int ret;
932 930
933 intel_reset_gt_powersave(dev); 931 intel_reset_gt_powersave(dev_priv);
934 932
935 mutex_lock(&dev->struct_mutex); 933 mutex_lock(&dev->struct_mutex);
936 934
@@ -946,7 +944,7 @@ int i915_reset(struct drm_device *dev)
946 944
947 i915_gem_reset(dev); 945 i915_gem_reset(dev);
948 946
949 ret = intel_gpu_reset(dev, ALL_ENGINES); 947 ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
950 948
951 /* Also reset the gpu hangman. */ 949 /* Also reset the gpu hangman. */
952 if (error->stop_rings != 0) { 950 if (error->stop_rings != 0) {
@@ -1001,7 +999,7 @@ int i915_reset(struct drm_device *dev)
1001 * of re-init after reset. 999 * of re-init after reset.
1002 */ 1000 */
1003 if (INTEL_INFO(dev)->gen > 5) 1001 if (INTEL_INFO(dev)->gen > 5)
1004 intel_enable_gt_powersave(dev); 1002 intel_enable_gt_powersave(dev_priv);
1005 1003
1006 return 0; 1004 return 0;
1007 1005
@@ -1030,13 +1028,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1030 if (PCI_FUNC(pdev->devfn)) 1028 if (PCI_FUNC(pdev->devfn))
1031 return -ENODEV; 1029 return -ENODEV;
1032 1030
1033 /* 1031 if (vga_switcheroo_client_probe_defer(pdev))
1034 * apple-gmux is needed on dual GPU MacBook Pro
1035 * to probe the panel if we're the inactive GPU.
1036 */
1037 if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
1038 apple_gmux_present() && pdev != vga_default_device() &&
1039 !vga_switcheroo_handler_flags())
1040 return -EPROBE_DEFER; 1032 return -EPROBE_DEFER;
1041 1033
1042 return drm_get_pci_dev(pdev, ent, &driver); 1034 return drm_get_pci_dev(pdev, ent, &driver);
@@ -1115,6 +1107,49 @@ static int i915_pm_resume(struct device *dev)
1115 return i915_drm_resume(drm_dev); 1107 return i915_drm_resume(drm_dev);
1116} 1108}
1117 1109
1110/* freeze: before creating the hibernation_image */
1111static int i915_pm_freeze(struct device *dev)
1112{
1113 return i915_pm_suspend(dev);
1114}
1115
1116static int i915_pm_freeze_late(struct device *dev)
1117{
1118 int ret;
1119
1120 ret = i915_pm_suspend_late(dev);
1121 if (ret)
1122 return ret;
1123
1124 ret = i915_gem_freeze_late(dev_to_i915(dev));
1125 if (ret)
1126 return ret;
1127
1128 return 0;
1129}
1130
1131/* thaw: called after creating the hibernation image, but before turning off. */
1132static int i915_pm_thaw_early(struct device *dev)
1133{
1134 return i915_pm_resume_early(dev);
1135}
1136
1137static int i915_pm_thaw(struct device *dev)
1138{
1139 return i915_pm_resume(dev);
1140}
1141
1142/* restore: called after loading the hibernation image. */
1143static int i915_pm_restore_early(struct device *dev)
1144{
1145 return i915_pm_resume_early(dev);
1146}
1147
1148static int i915_pm_restore(struct device *dev)
1149{
1150 return i915_pm_resume(dev);
1151}
1152
1118/* 1153/*
1119 * Save all Gunit registers that may be lost after a D3 and a subsequent 1154 * Save all Gunit registers that may be lost after a D3 and a subsequent
1120 * S0i[R123] transition. The list of registers needing a save/restore is 1155 * S0i[R123] transition. The list of registers needing a save/restore is
@@ -1478,7 +1513,7 @@ static int intel_runtime_suspend(struct device *device)
1478 struct drm_i915_private *dev_priv = dev->dev_private; 1513 struct drm_i915_private *dev_priv = dev->dev_private;
1479 int ret; 1514 int ret;
1480 1515
1481 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) 1516 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
1482 return -ENODEV; 1517 return -ENODEV;
1483 1518
1484 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 1519 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
@@ -1517,7 +1552,7 @@ static int intel_runtime_suspend(struct device *device)
1517 1552
1518 intel_guc_suspend(dev); 1553 intel_guc_suspend(dev);
1519 1554
1520 intel_suspend_gt_powersave(dev); 1555 intel_suspend_gt_powersave(dev_priv);
1521 intel_runtime_pm_disable_interrupts(dev_priv); 1556 intel_runtime_pm_disable_interrupts(dev_priv);
1522 1557
1523 ret = 0; 1558 ret = 0;
@@ -1539,7 +1574,7 @@ static int intel_runtime_suspend(struct device *device)
1539 return ret; 1574 return ret;
1540 } 1575 }
1541 1576
1542 intel_uncore_forcewake_reset(dev, false); 1577 intel_uncore_forcewake_reset(dev_priv, false);
1543 1578
1544 enable_rpm_wakeref_asserts(dev_priv); 1579 enable_rpm_wakeref_asserts(dev_priv);
1545 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 1580 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
@@ -1553,14 +1588,14 @@ static int intel_runtime_suspend(struct device *device)
1553 * FIXME: We really should find a document that references the arguments 1588 * FIXME: We really should find a document that references the arguments
1554 * used below! 1589 * used below!
1555 */ 1590 */
1556 if (IS_BROADWELL(dev)) { 1591 if (IS_BROADWELL(dev_priv)) {
1557 /* 1592 /*
1558 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop 1593 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1559 * being detected, and the call we do at intel_runtime_resume() 1594 * being detected, and the call we do at intel_runtime_resume()
1560 * won't be able to restore them. Since PCI_D3hot matches the 1595 * won't be able to restore them. Since PCI_D3hot matches the
1561 * actual specification and appears to be working, use it. 1596 * actual specification and appears to be working, use it.
1562 */ 1597 */
1563 intel_opregion_notify_adapter(dev, PCI_D3hot); 1598 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
1564 } else { 1599 } else {
1565 /* 1600 /*
1566 * current versions of firmware which depend on this opregion 1601 * current versions of firmware which depend on this opregion
@@ -1569,7 +1604,7 @@ static int intel_runtime_suspend(struct device *device)
1569 * to distinguish it from notifications that might be sent via 1604 * to distinguish it from notifications that might be sent via
1570 * the suspend path. 1605 * the suspend path.
1571 */ 1606 */
1572 intel_opregion_notify_adapter(dev, PCI_D1); 1607 intel_opregion_notify_adapter(dev_priv, PCI_D1);
1573 } 1608 }
1574 1609
1575 assert_forcewakes_inactive(dev_priv); 1610 assert_forcewakes_inactive(dev_priv);
@@ -1593,7 +1628,7 @@ static int intel_runtime_resume(struct device *device)
1593 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 1628 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
1594 disable_rpm_wakeref_asserts(dev_priv); 1629 disable_rpm_wakeref_asserts(dev_priv);
1595 1630
1596 intel_opregion_notify_adapter(dev, PCI_D0); 1631 intel_opregion_notify_adapter(dev_priv, PCI_D0);
1597 dev_priv->pm.suspended = false; 1632 dev_priv->pm.suspended = false;
1598 if (intel_uncore_unclaimed_mmio(dev_priv)) 1633 if (intel_uncore_unclaimed_mmio(dev_priv))
1599 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); 1634 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
@@ -1620,7 +1655,7 @@ static int intel_runtime_resume(struct device *device)
1620 * we can do is to hope that things will still work (and disable RPM). 1655 * we can do is to hope that things will still work (and disable RPM).
1621 */ 1656 */
1622 i915_gem_init_swizzling(dev); 1657 i915_gem_init_swizzling(dev);
1623 gen6_update_ring_freq(dev); 1658 gen6_update_ring_freq(dev_priv);
1624 1659
1625 intel_runtime_pm_enable_interrupts(dev_priv); 1660 intel_runtime_pm_enable_interrupts(dev_priv);
1626 1661
@@ -1632,7 +1667,7 @@ static int intel_runtime_resume(struct device *device)
1632 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 1667 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1633 intel_hpd_init(dev_priv); 1668 intel_hpd_init(dev_priv);
1634 1669
1635 intel_enable_gt_powersave(dev); 1670 intel_enable_gt_powersave(dev_priv);
1636 1671
1637 enable_rpm_wakeref_asserts(dev_priv); 1672 enable_rpm_wakeref_asserts(dev_priv);
1638 1673
@@ -1669,14 +1704,14 @@ static const struct dev_pm_ops i915_pm_ops = {
1669 * @restore, @restore_early : called after rebooting and restoring the 1704 * @restore, @restore_early : called after rebooting and restoring the
1670 * hibernation image [PMSG_RESTORE] 1705 * hibernation image [PMSG_RESTORE]
1671 */ 1706 */
1672 .freeze = i915_pm_suspend, 1707 .freeze = i915_pm_freeze,
1673 .freeze_late = i915_pm_suspend_late, 1708 .freeze_late = i915_pm_freeze_late,
1674 .thaw_early = i915_pm_resume_early, 1709 .thaw_early = i915_pm_thaw_early,
1675 .thaw = i915_pm_resume, 1710 .thaw = i915_pm_thaw,
1676 .poweroff = i915_pm_suspend, 1711 .poweroff = i915_pm_suspend,
1677 .poweroff_late = i915_pm_poweroff_late, 1712 .poweroff_late = i915_pm_poweroff_late,
1678 .restore_early = i915_pm_resume_early, 1713 .restore_early = i915_pm_restore_early,
1679 .restore = i915_pm_resume, 1714 .restore = i915_pm_restore,
1680 1715
1681 /* S0ix (via runtime suspend) event handlers */ 1716 /* S0ix (via runtime suspend) event handlers */
1682 .runtime_suspend = intel_runtime_suspend, 1717 .runtime_suspend = intel_runtime_suspend,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5faacc6e548d..0113207967d9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -66,7 +66,7 @@
66 66
67#define DRIVER_NAME "i915" 67#define DRIVER_NAME "i915"
68#define DRIVER_DESC "Intel Graphics" 68#define DRIVER_DESC "Intel Graphics"
69#define DRIVER_DATE "20160425" 69#define DRIVER_DATE "20160606"
70 70
71#undef WARN_ON 71#undef WARN_ON
72/* Many gcc seem to no see through this and fall over :( */ 72/* Many gcc seem to no see through this and fall over :( */
@@ -324,6 +324,12 @@ struct i915_hotplug {
324 &dev->mode_config.plane_list, \ 324 &dev->mode_config.plane_list, \
325 base.head) 325 base.head)
326 326
327#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
328 list_for_each_entry(intel_plane, &dev->mode_config.plane_list, \
329 base.head) \
330 for_each_if ((plane_mask) & \
331 (1 << drm_plane_index(&intel_plane->base)))
332
327#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ 333#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
328 list_for_each_entry(intel_plane, \ 334 list_for_each_entry(intel_plane, \
329 &(dev)->mode_config.plane_list, \ 335 &(dev)->mode_config.plane_list, \
@@ -333,6 +339,10 @@ struct i915_hotplug {
333#define for_each_intel_crtc(dev, intel_crtc) \ 339#define for_each_intel_crtc(dev, intel_crtc) \
334 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) 340 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
335 341
342#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
343 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) \
344 for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
345
336#define for_each_intel_encoder(dev, intel_encoder) \ 346#define for_each_intel_encoder(dev, intel_encoder) \
337 list_for_each_entry(intel_encoder, \ 347 list_for_each_entry(intel_encoder, \
338 &(dev)->mode_config.encoder_list, \ 348 &(dev)->mode_config.encoder_list, \
@@ -588,6 +598,7 @@ struct drm_i915_display_funcs {
588 struct intel_crtc_state *newstate); 598 struct intel_crtc_state *newstate);
589 void (*initial_watermarks)(struct intel_crtc_state *cstate); 599 void (*initial_watermarks)(struct intel_crtc_state *cstate);
590 void (*optimize_watermarks)(struct intel_crtc_state *cstate); 600 void (*optimize_watermarks)(struct intel_crtc_state *cstate);
601 int (*compute_global_watermarks)(struct drm_atomic_state *state);
591 void (*update_wm)(struct drm_crtc *crtc); 602 void (*update_wm)(struct drm_crtc *crtc);
592 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 603 int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
593 void (*modeset_commit_cdclk)(struct drm_atomic_state *state); 604 void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
@@ -612,7 +623,7 @@ struct drm_i915_display_funcs {
612 struct drm_i915_gem_object *obj, 623 struct drm_i915_gem_object *obj,
613 struct drm_i915_gem_request *req, 624 struct drm_i915_gem_request *req,
614 uint32_t flags); 625 uint32_t flags);
615 void (*hpd_irq_setup)(struct drm_device *dev); 626 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
616 /* clock updates for mode set */ 627 /* clock updates for mode set */
617 /* cursor updates */ 628 /* cursor updates */
618 /* render clock increase/decrease */ 629 /* render clock increase/decrease */
@@ -735,6 +746,7 @@ struct intel_csr {
735 func(is_valleyview) sep \ 746 func(is_valleyview) sep \
736 func(is_cherryview) sep \ 747 func(is_cherryview) sep \
737 func(is_haswell) sep \ 748 func(is_haswell) sep \
749 func(is_broadwell) sep \
738 func(is_skylake) sep \ 750 func(is_skylake) sep \
739 func(is_broxton) sep \ 751 func(is_broxton) sep \
740 func(is_kabylake) sep \ 752 func(is_kabylake) sep \
@@ -757,9 +769,10 @@ struct intel_csr {
757struct intel_device_info { 769struct intel_device_info {
758 u32 display_mmio_offset; 770 u32 display_mmio_offset;
759 u16 device_id; 771 u16 device_id;
760 u8 num_pipes:3; 772 u8 num_pipes;
761 u8 num_sprites[I915_MAX_PIPES]; 773 u8 num_sprites[I915_MAX_PIPES];
762 u8 gen; 774 u8 gen;
775 u16 gen_mask;
763 u8 ring_mask; /* Rings supported by the HW */ 776 u8 ring_mask; /* Rings supported by the HW */
764 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 777 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
765 /* Register offsets for the various display pipes and transcoders */ 778 /* Register offsets for the various display pipes and transcoders */
@@ -821,9 +834,8 @@ struct i915_ctx_hang_stats {
821/* This must match up with the value previously used for execbuf2.rsvd1. */ 834/* This must match up with the value previously used for execbuf2.rsvd1. */
822#define DEFAULT_CONTEXT_HANDLE 0 835#define DEFAULT_CONTEXT_HANDLE 0
823 836
824#define CONTEXT_NO_ZEROMAP (1<<0)
825/** 837/**
826 * struct intel_context - as the name implies, represents a context. 838 * struct i915_gem_context - as the name implies, represents a context.
827 * @ref: reference count. 839 * @ref: reference count.
828 * @user_handle: userspace tracking identity for this context. 840 * @user_handle: userspace tracking identity for this context.
829 * @remap_slice: l3 row remapping information. 841 * @remap_slice: l3 row remapping information.
@@ -841,33 +853,33 @@ struct i915_ctx_hang_stats {
841 * Contexts are memory images used by the hardware to store copies of their 853 * Contexts are memory images used by the hardware to store copies of their
842 * internal state. 854 * internal state.
843 */ 855 */
844struct intel_context { 856struct i915_gem_context {
845 struct kref ref; 857 struct kref ref;
846 int user_handle;
847 uint8_t remap_slice;
848 struct drm_i915_private *i915; 858 struct drm_i915_private *i915;
849 int flags;
850 struct drm_i915_file_private *file_priv; 859 struct drm_i915_file_private *file_priv;
851 struct i915_ctx_hang_stats hang_stats;
852 struct i915_hw_ppgtt *ppgtt; 860 struct i915_hw_ppgtt *ppgtt;
853 861
854 /* Legacy ring buffer submission */ 862 struct i915_ctx_hang_stats hang_stats;
855 struct {
856 struct drm_i915_gem_object *rcs_state;
857 bool initialized;
858 } legacy_hw_ctx;
859 863
860 /* Execlists */ 864 /* Unique identifier for this context, used by the hw for tracking */
861 struct { 865 unsigned long flags;
866 unsigned hw_id;
867 u32 user_handle;
868#define CONTEXT_NO_ZEROMAP (1<<0)
869
870 struct intel_context {
862 struct drm_i915_gem_object *state; 871 struct drm_i915_gem_object *state;
863 struct intel_ringbuffer *ringbuf; 872 struct intel_ringbuffer *ringbuf;
864 int pin_count;
865 struct i915_vma *lrc_vma; 873 struct i915_vma *lrc_vma;
866 u64 lrc_desc;
867 uint32_t *lrc_reg_state; 874 uint32_t *lrc_reg_state;
875 u64 lrc_desc;
876 int pin_count;
877 bool initialised;
868 } engine[I915_NUM_ENGINES]; 878 } engine[I915_NUM_ENGINES];
869 879
870 struct list_head link; 880 struct list_head link;
881
882 u8 remap_slice;
871}; 883};
872 884
873enum fb_op_origin { 885enum fb_op_origin {
@@ -1115,6 +1127,8 @@ struct intel_gen6_power_mgmt {
1115 bool interrupts_enabled; 1127 bool interrupts_enabled;
1116 u32 pm_iir; 1128 u32 pm_iir;
1117 1129
1130 u32 pm_intr_keep;
1131
1118 /* Frequencies are stored in potentially platform dependent multiples. 1132 /* Frequencies are stored in potentially platform dependent multiples.
1119 * In other words, *_freq needs to be multiplied by X to be interesting. 1133 * In other words, *_freq needs to be multiplied by X to be interesting.
1120 * Soft limits are those which are used for the dynamic reclocking done 1134 * Soft limits are those which are used for the dynamic reclocking done
@@ -1488,6 +1502,7 @@ struct intel_vbt_data {
1488 bool present; 1502 bool present;
1489 bool active_low_pwm; 1503 bool active_low_pwm;
1490 u8 min_brightness; /* min_brightness/255 of max */ 1504 u8 min_brightness; /* min_brightness/255 of max */
1505 enum intel_backlight_type type;
1491 } backlight; 1506 } backlight;
1492 1507
1493 /* MIPI DSI */ 1508 /* MIPI DSI */
@@ -1580,7 +1595,7 @@ struct skl_ddb_allocation {
1580}; 1595};
1581 1596
1582struct skl_wm_values { 1597struct skl_wm_values {
1583 bool dirty[I915_MAX_PIPES]; 1598 unsigned dirty_pipes;
1584 struct skl_ddb_allocation ddb; 1599 struct skl_ddb_allocation ddb;
1585 uint32_t wm_linetime[I915_MAX_PIPES]; 1600 uint32_t wm_linetime[I915_MAX_PIPES];
1586 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; 1601 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
@@ -1697,7 +1712,7 @@ struct i915_execbuffer_params {
1697 uint64_t batch_obj_vm_offset; 1712 uint64_t batch_obj_vm_offset;
1698 struct intel_engine_cs *engine; 1713 struct intel_engine_cs *engine;
1699 struct drm_i915_gem_object *batch_obj; 1714 struct drm_i915_gem_object *batch_obj;
1700 struct intel_context *ctx; 1715 struct i915_gem_context *ctx;
1701 struct drm_i915_gem_request *request; 1716 struct drm_i915_gem_request *request;
1702}; 1717};
1703 1718
@@ -1747,6 +1762,7 @@ struct drm_i915_private {
1747 wait_queue_head_t gmbus_wait_queue; 1762 wait_queue_head_t gmbus_wait_queue;
1748 1763
1749 struct pci_dev *bridge_dev; 1764 struct pci_dev *bridge_dev;
1765 struct i915_gem_context *kernel_context;
1750 struct intel_engine_cs engine[I915_NUM_ENGINES]; 1766 struct intel_engine_cs engine[I915_NUM_ENGINES];
1751 struct drm_i915_gem_object *semaphore_obj; 1767 struct drm_i915_gem_object *semaphore_obj;
1752 uint32_t last_seqno, next_seqno; 1768 uint32_t last_seqno, next_seqno;
@@ -1802,13 +1818,17 @@ struct drm_i915_private {
1802 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1818 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1803 1819
1804 unsigned int fsb_freq, mem_freq, is_ddr3; 1820 unsigned int fsb_freq, mem_freq, is_ddr3;
1805 unsigned int skl_boot_cdclk; 1821 unsigned int skl_preferred_vco_freq;
1806 unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; 1822 unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
1807 unsigned int max_dotclk_freq; 1823 unsigned int max_dotclk_freq;
1808 unsigned int rawclk_freq; 1824 unsigned int rawclk_freq;
1809 unsigned int hpll_freq; 1825 unsigned int hpll_freq;
1810 unsigned int czclk_freq; 1826 unsigned int czclk_freq;
1811 1827
1828 struct {
1829 unsigned int vco, ref;
1830 } cdclk_pll;
1831
1812 /** 1832 /**
1813 * wq - Driver workqueue for GEM. 1833 * wq - Driver workqueue for GEM.
1814 * 1834 *
@@ -1838,6 +1858,13 @@ struct drm_i915_private {
1838 DECLARE_HASHTABLE(mm_structs, 7); 1858 DECLARE_HASHTABLE(mm_structs, 7);
1839 struct mutex mm_lock; 1859 struct mutex mm_lock;
1840 1860
1861 /* The hw wants to have a stable context identifier for the lifetime
1862 * of the context (for OA, PASID, faults, etc). This is limited
1863 * in execlists to 21 bits.
1864 */
1865 struct ida context_hw_ida;
1866#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
1867
1841 /* Kernel Modesetting */ 1868 /* Kernel Modesetting */
1842 1869
1843 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1870 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@@ -1950,9 +1977,6 @@ struct drm_i915_private {
1950 */ 1977 */
1951 uint16_t skl_latency[8]; 1978 uint16_t skl_latency[8];
1952 1979
1953 /* Committed wm config */
1954 struct intel_wm_config config;
1955
1956 /* 1980 /*
1957 * The skl_wm_values structure is a bit too big for stack 1981 * The skl_wm_values structure is a bit too big for stack
1958 * allocation, so we keep the staging struct where we store 1982 * allocation, so we keep the staging struct where we store
@@ -1975,6 +1999,13 @@ struct drm_i915_private {
1975 * cstate->wm.need_postvbl_update. 1999 * cstate->wm.need_postvbl_update.
1976 */ 2000 */
1977 struct mutex wm_mutex; 2001 struct mutex wm_mutex;
2002
2003 /*
2004 * Set during HW readout of watermarks/DDB. Some platforms
2005 * need to know when we're still using BIOS-provided values
2006 * (which we don't fully trust).
2007 */
2008 bool distrust_bios_wm;
1978 } wm; 2009 } wm;
1979 2010
1980 struct i915_runtime_pm pm; 2011 struct i915_runtime_pm pm;
@@ -1989,8 +2020,6 @@ struct drm_i915_private {
1989 void (*stop_engine)(struct intel_engine_cs *engine); 2020 void (*stop_engine)(struct intel_engine_cs *engine);
1990 } gt; 2021 } gt;
1991 2022
1992 struct intel_context *kernel_context;
1993
1994 /* perform PHY state sanity checks? */ 2023 /* perform PHY state sanity checks? */
1995 bool chv_phy_assert[2]; 2024 bool chv_phy_assert[2];
1996 2025
@@ -2227,9 +2256,75 @@ struct drm_i915_gem_object {
2227}; 2256};
2228#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 2257#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
2229 2258
2230void i915_gem_track_fb(struct drm_i915_gem_object *old, 2259/*
2231 struct drm_i915_gem_object *new, 2260 * Optimised SGL iterator for GEM objects
2232 unsigned frontbuffer_bits); 2261 */
2262static __always_inline struct sgt_iter {
2263 struct scatterlist *sgp;
2264 union {
2265 unsigned long pfn;
2266 dma_addr_t dma;
2267 };
2268 unsigned int curr;
2269 unsigned int max;
2270} __sgt_iter(struct scatterlist *sgl, bool dma) {
2271 struct sgt_iter s = { .sgp = sgl };
2272
2273 if (s.sgp) {
2274 s.max = s.curr = s.sgp->offset;
2275 s.max += s.sgp->length;
2276 if (dma)
2277 s.dma = sg_dma_address(s.sgp);
2278 else
2279 s.pfn = page_to_pfn(sg_page(s.sgp));
2280 }
2281
2282 return s;
2283}
2284
2285/**
2286 * __sg_next - return the next scatterlist entry in a list
2287 * @sg: The current sg entry
2288 *
2289 * Description:
2290 * If the entry is the last, return NULL; otherwise, step to the next
2291 * element in the array (@sg@+1). If that's a chain pointer, follow it;
2292 * otherwise just return the pointer to the current element.
2293 **/
2294static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2295{
2296#ifdef CONFIG_DEBUG_SG
2297 BUG_ON(sg->sg_magic != SG_MAGIC);
2298#endif
2299 return sg_is_last(sg) ? NULL :
2300 likely(!sg_is_chain(++sg)) ? sg :
2301 sg_chain_ptr(sg);
2302}
2303
2304/**
2305 * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
2306 * @__dmap: DMA address (output)
2307 * @__iter: 'struct sgt_iter' (iterator state, internal)
2308 * @__sgt: sg_table to iterate over (input)
2309 */
2310#define for_each_sgt_dma(__dmap, __iter, __sgt) \
2311 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
2312 ((__dmap) = (__iter).dma + (__iter).curr); \
2313 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
2314 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0))
2315
2316/**
2317 * for_each_sgt_page - iterate over the pages of the given sg_table
2318 * @__pp: page pointer (output)
2319 * @__iter: 'struct sgt_iter' (iterator state, internal)
2320 * @__sgt: sg_table to iterate over (input)
2321 */
2322#define for_each_sgt_page(__pp, __iter, __sgt) \
2323 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
2324 ((__pp) = (__iter).pfn == 0 ? NULL : \
2325 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
2326 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
2327 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
2233 2328
2234/** 2329/**
2235 * Request queue structure. 2330 * Request queue structure.
@@ -2278,6 +2373,9 @@ struct drm_i915_gem_request {
2278 /** Position in the ringbuffer of the end of the whole request */ 2373 /** Position in the ringbuffer of the end of the whole request */
2279 u32 tail; 2374 u32 tail;
2280 2375
2376 /** Preallocate space in the ringbuffer for the emitting the request */
2377 u32 reserved_space;
2378
2281 /** 2379 /**
2282 * Context and ring buffer related to this request 2380 * Context and ring buffer related to this request
2283 * Contexts are refcounted, so when this request is associated with a 2381 * Contexts are refcounted, so when this request is associated with a
@@ -2288,9 +2386,20 @@ struct drm_i915_gem_request {
2288 * i915_gem_request_free() will then decrement the refcount on the 2386 * i915_gem_request_free() will then decrement the refcount on the
2289 * context. 2387 * context.
2290 */ 2388 */
2291 struct intel_context *ctx; 2389 struct i915_gem_context *ctx;
2292 struct intel_ringbuffer *ringbuf; 2390 struct intel_ringbuffer *ringbuf;
2293 2391
2392 /**
2393 * Context related to the previous request.
2394 * As the contexts are accessed by the hardware until the switch is
2395 * completed to a new context, the hardware may still be writing
2396 * to the context object after the breadcrumb is visible. We must
2397 * not unpin/unbind/prune that object whilst still active and so
2398 * we keep the previous context pinned until the following (this)
2399 * request is retired.
2400 */
2401 struct i915_gem_context *previous_context;
2402
2294 /** Batch buffer related to this request if any (used for 2403 /** Batch buffer related to this request if any (used for
2295 error state dump only) */ 2404 error state dump only) */
2296 struct drm_i915_gem_object *batch_obj; 2405 struct drm_i915_gem_object *batch_obj;
@@ -2327,11 +2436,13 @@ struct drm_i915_gem_request {
2327 /** Execlists no. of times this request has been sent to the ELSP */ 2436 /** Execlists no. of times this request has been sent to the ELSP */
2328 int elsp_submitted; 2437 int elsp_submitted;
2329 2438
2439 /** Execlists context hardware id. */
2440 unsigned ctx_hw_id;
2330}; 2441};
2331 2442
2332struct drm_i915_gem_request * __must_check 2443struct drm_i915_gem_request * __must_check
2333i915_gem_request_alloc(struct intel_engine_cs *engine, 2444i915_gem_request_alloc(struct intel_engine_cs *engine,
2334 struct intel_context *ctx); 2445 struct i915_gem_context *ctx);
2335void i915_gem_request_free(struct kref *req_ref); 2446void i915_gem_request_free(struct kref *req_ref);
2336int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, 2447int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
2337 struct drm_file *file); 2448 struct drm_file *file);
@@ -2359,23 +2470,9 @@ i915_gem_request_reference(struct drm_i915_gem_request *req)
2359static inline void 2470static inline void
2360i915_gem_request_unreference(struct drm_i915_gem_request *req) 2471i915_gem_request_unreference(struct drm_i915_gem_request *req)
2361{ 2472{
2362 WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex));
2363 kref_put(&req->ref, i915_gem_request_free); 2473 kref_put(&req->ref, i915_gem_request_free);
2364} 2474}
2365 2475
2366static inline void
2367i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
2368{
2369 struct drm_device *dev;
2370
2371 if (!req)
2372 return;
2373
2374 dev = req->engine->dev;
2375 if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
2376 mutex_unlock(&dev->struct_mutex);
2377}
2378
2379static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, 2476static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
2380 struct drm_i915_gem_request *src) 2477 struct drm_i915_gem_request *src)
2381{ 2478{
@@ -2503,9 +2600,29 @@ struct drm_i915_cmd_table {
2503#define INTEL_INFO(p) (&__I915__(p)->info) 2600#define INTEL_INFO(p) (&__I915__(p)->info)
2504#define INTEL_GEN(p) (INTEL_INFO(p)->gen) 2601#define INTEL_GEN(p) (INTEL_INFO(p)->gen)
2505#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) 2602#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
2506#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
2507 2603
2508#define REVID_FOREVER 0xff 2604#define REVID_FOREVER 0xff
2605#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
2606
2607#define GEN_FOREVER (0)
2608/*
2609 * Returns true if Gen is in inclusive range [Start, End].
2610 *
2611 * Use GEN_FOREVER for unbound start and or end.
2612 */
2613#define IS_GEN(p, s, e) ({ \
2614 unsigned int __s = (s), __e = (e); \
2615 BUILD_BUG_ON(!__builtin_constant_p(s)); \
2616 BUILD_BUG_ON(!__builtin_constant_p(e)); \
2617 if ((__s) != GEN_FOREVER) \
2618 __s = (s) - 1; \
2619 if ((__e) == GEN_FOREVER) \
2620 __e = BITS_PER_LONG - 1; \
2621 else \
2622 __e = (e) - 1; \
2623 !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \
2624})
2625
2509/* 2626/*
2510 * Return true if revision is in range [since,until] inclusive. 2627 * Return true if revision is in range [since,until] inclusive.
2511 * 2628 *
@@ -2538,7 +2655,7 @@ struct drm_i915_cmd_table {
2538#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 2655#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
2539#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview) 2656#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview)
2540#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2657#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
2541#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev)) 2658#define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell)
2542#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) 2659#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
2543#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton) 2660#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
2544#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake) 2661#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
@@ -2606,14 +2723,14 @@ struct drm_i915_cmd_table {
2606 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2723 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
2607 * chips, etc.). 2724 * chips, etc.).
2608 */ 2725 */
2609#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 2726#define IS_GEN2(dev) (INTEL_INFO(dev)->gen_mask & BIT(1))
2610#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 2727#define IS_GEN3(dev) (INTEL_INFO(dev)->gen_mask & BIT(2))
2611#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 2728#define IS_GEN4(dev) (INTEL_INFO(dev)->gen_mask & BIT(3))
2612#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 2729#define IS_GEN5(dev) (INTEL_INFO(dev)->gen_mask & BIT(4))
2613#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 2730#define IS_GEN6(dev) (INTEL_INFO(dev)->gen_mask & BIT(5))
2614#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 2731#define IS_GEN7(dev) (INTEL_INFO(dev)->gen_mask & BIT(6))
2615#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) 2732#define IS_GEN8(dev) (INTEL_INFO(dev)->gen_mask & BIT(7))
2616#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) 2733#define IS_GEN9(dev) (INTEL_INFO(dev)->gen_mask & BIT(8))
2617 2734
2618#define RENDER_RING (1<<RCS) 2735#define RENDER_RING (1<<RCS)
2619#define BSD_RING (1<<VCS) 2736#define BSD_RING (1<<VCS)
@@ -2686,12 +2803,18 @@ struct drm_i915_cmd_table {
2686 IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \ 2803 IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
2687 IS_KABYLAKE(dev) || IS_BROXTON(dev)) 2804 IS_KABYLAKE(dev) || IS_BROXTON(dev))
2688#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) 2805#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
2689#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 2806#define HAS_RC6p(dev) (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
2690 2807
2691#define HAS_CSR(dev) (IS_GEN9(dev)) 2808#define HAS_CSR(dev) (IS_GEN9(dev))
2692 2809
2693#define HAS_GUC_UCODE(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) 2810/*
2694#define HAS_GUC_SCHED(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) 2811 * For now, anything with a GuC requires uCode loading, and then supports
2812 * command submission once loaded. But these are logically independent
2813 * properties, so we have separate macros to test them.
2814 */
2815#define HAS_GUC(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev))
2816#define HAS_GUC_UCODE(dev) (HAS_GUC(dev))
2817#define HAS_GUC_SCHED(dev) (HAS_GUC(dev))
2695 2818
2696#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ 2819#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
2697 INTEL_INFO(dev)->gen >= 8) 2820 INTEL_INFO(dev)->gen >= 8)
@@ -2740,6 +2863,9 @@ extern int i915_max_ioctl;
2740extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); 2863extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
2741extern int i915_resume_switcheroo(struct drm_device *dev); 2864extern int i915_resume_switcheroo(struct drm_device *dev);
2742 2865
2866int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
2867 int enable_ppgtt);
2868
2743/* i915_dma.c */ 2869/* i915_dma.c */
2744void __printf(3, 4) 2870void __printf(3, 4)
2745__i915_printk(struct drm_i915_private *dev_priv, const char *level, 2871__i915_printk(struct drm_i915_private *dev_priv, const char *level,
@@ -2760,9 +2886,9 @@ extern void i915_driver_postclose(struct drm_device *dev,
2760extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2886extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2761 unsigned long arg); 2887 unsigned long arg);
2762#endif 2888#endif
2763extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask); 2889extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
2764extern bool intel_has_gpu_reset(struct drm_device *dev); 2890extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
2765extern int i915_reset(struct drm_device *dev); 2891extern int i915_reset(struct drm_i915_private *dev_priv);
2766extern int intel_guc_reset(struct drm_i915_private *dev_priv); 2892extern int intel_guc_reset(struct drm_i915_private *dev_priv);
2767extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); 2893extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
2768extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2894extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2772,30 +2898,33 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2772int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2898int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2773 2899
2774/* intel_hotplug.c */ 2900/* intel_hotplug.c */
2775void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); 2901void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
2902 u32 pin_mask, u32 long_mask);
2776void intel_hpd_init(struct drm_i915_private *dev_priv); 2903void intel_hpd_init(struct drm_i915_private *dev_priv);
2777void intel_hpd_init_work(struct drm_i915_private *dev_priv); 2904void intel_hpd_init_work(struct drm_i915_private *dev_priv);
2778void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2905void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2779bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); 2906bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
2780 2907
2781/* i915_irq.c */ 2908/* i915_irq.c */
2782void i915_queue_hangcheck(struct drm_device *dev); 2909void i915_queue_hangcheck(struct drm_i915_private *dev_priv);
2783__printf(3, 4) 2910__printf(3, 4)
2784void i915_handle_error(struct drm_device *dev, u32 engine_mask, 2911void i915_handle_error(struct drm_i915_private *dev_priv,
2912 u32 engine_mask,
2785 const char *fmt, ...); 2913 const char *fmt, ...);
2786 2914
2787extern void intel_irq_init(struct drm_i915_private *dev_priv); 2915extern void intel_irq_init(struct drm_i915_private *dev_priv);
2788int intel_irq_install(struct drm_i915_private *dev_priv); 2916int intel_irq_install(struct drm_i915_private *dev_priv);
2789void intel_irq_uninstall(struct drm_i915_private *dev_priv); 2917void intel_irq_uninstall(struct drm_i915_private *dev_priv);
2790 2918
2791extern void intel_uncore_sanitize(struct drm_device *dev); 2919extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
2792extern void intel_uncore_early_sanitize(struct drm_device *dev, 2920extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
2793 bool restore_forcewake); 2921 bool restore_forcewake);
2794extern void intel_uncore_init(struct drm_device *dev); 2922extern void intel_uncore_init(struct drm_i915_private *dev_priv);
2795extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); 2923extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
2796extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); 2924extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
2797extern void intel_uncore_fini(struct drm_device *dev); 2925extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
2798extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); 2926extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
2927 bool restore);
2799const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 2928const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
2800void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 2929void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
2801 enum forcewake_domains domains); 2930 enum forcewake_domains domains);
@@ -2811,9 +2940,9 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
2811u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); 2940u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
2812 2941
2813void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 2942void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
2814static inline bool intel_vgpu_active(struct drm_device *dev) 2943static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
2815{ 2944{
2816 return to_i915(dev)->vgpu.active; 2945 return dev_priv->vgpu.active;
2817} 2946}
2818 2947
2819void 2948void
@@ -2909,7 +3038,7 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data,
2909 struct drm_file *file_priv); 3038 struct drm_file *file_priv);
2910int i915_gem_get_tiling(struct drm_device *dev, void *data, 3039int i915_gem_get_tiling(struct drm_device *dev, void *data,
2911 struct drm_file *file_priv); 3040 struct drm_file *file_priv);
2912int i915_gem_init_userptr(struct drm_device *dev); 3041void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
2913int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 3042int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
2914 struct drm_file *file); 3043 struct drm_file *file);
2915int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 3044int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
@@ -2919,11 +3048,13 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
2919void i915_gem_load_init(struct drm_device *dev); 3048void i915_gem_load_init(struct drm_device *dev);
2920void i915_gem_load_cleanup(struct drm_device *dev); 3049void i915_gem_load_cleanup(struct drm_device *dev);
2921void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); 3050void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
3051int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
3052
2922void *i915_gem_object_alloc(struct drm_device *dev); 3053void *i915_gem_object_alloc(struct drm_device *dev);
2923void i915_gem_object_free(struct drm_i915_gem_object *obj); 3054void i915_gem_object_free(struct drm_i915_gem_object *obj);
2924void i915_gem_object_init(struct drm_i915_gem_object *obj, 3055void i915_gem_object_init(struct drm_i915_gem_object *obj,
2925 const struct drm_i915_gem_object_ops *ops); 3056 const struct drm_i915_gem_object_ops *ops);
2926struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 3057struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
2927 size_t size); 3058 size_t size);
2928struct drm_i915_gem_object *i915_gem_object_create_from_data( 3059struct drm_i915_gem_object *i915_gem_object_create_from_data(
2929 struct drm_device *dev, const void *data, size_t size); 3060 struct drm_device *dev, const void *data, size_t size);
@@ -3054,6 +3185,11 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
3054 struct drm_mode_create_dumb *args); 3185 struct drm_mode_create_dumb *args);
3055int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 3186int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
3056 uint32_t handle, uint64_t *offset); 3187 uint32_t handle, uint64_t *offset);
3188
3189void i915_gem_track_fb(struct drm_i915_gem_object *old,
3190 struct drm_i915_gem_object *new,
3191 unsigned frontbuffer_bits);
3192
3057/** 3193/**
3058 * Returns true if seq1 is later than seq2. 3194 * Returns true if seq1 is later than seq2.
3059 */ 3195 */
@@ -3081,13 +3217,13 @@ static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
3081 req->seqno); 3217 req->seqno);
3082} 3218}
3083 3219
3084int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 3220int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
3085int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); 3221int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
3086 3222
3087struct drm_i915_gem_request * 3223struct drm_i915_gem_request *
3088i915_gem_find_active_request(struct intel_engine_cs *engine); 3224i915_gem_find_active_request(struct intel_engine_cs *engine);
3089 3225
3090bool i915_gem_retire_requests(struct drm_device *dev); 3226bool i915_gem_retire_requests(struct drm_i915_private *dev_priv);
3091void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); 3227void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
3092 3228
3093static inline u32 i915_reset_counter(struct i915_gpu_error *error) 3229static inline u32 i915_reset_counter(struct i915_gpu_error *error)
@@ -3147,7 +3283,6 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
3147int __must_check i915_gem_init(struct drm_device *dev); 3283int __must_check i915_gem_init(struct drm_device *dev);
3148int i915_gem_init_engines(struct drm_device *dev); 3284int i915_gem_init_engines(struct drm_device *dev);
3149int __must_check i915_gem_init_hw(struct drm_device *dev); 3285int __must_check i915_gem_init_hw(struct drm_device *dev);
3150int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
3151void i915_gem_init_swizzling(struct drm_device *dev); 3286void i915_gem_init_swizzling(struct drm_device *dev);
3152void i915_gem_cleanup_engines(struct drm_device *dev); 3287void i915_gem_cleanup_engines(struct drm_device *dev);
3153int __must_check i915_gpu_idle(struct drm_device *dev); 3288int __must_check i915_gpu_idle(struct drm_device *dev);
@@ -3215,8 +3350,6 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
3215bool i915_gem_obj_bound(struct drm_i915_gem_object *o, 3350bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
3216 struct i915_address_space *vm); 3351 struct i915_address_space *vm);
3217 3352
3218unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
3219 struct i915_address_space *vm);
3220struct i915_vma * 3353struct i915_vma *
3221i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 3354i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
3222 struct i915_address_space *vm); 3355 struct i915_address_space *vm);
@@ -3251,14 +3384,8 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
3251 return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); 3384 return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
3252} 3385}
3253 3386
3254static inline unsigned long 3387unsigned long
3255i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) 3388i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
3256{
3257 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3258 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3259
3260 return i915_gem_obj_size(obj, &ggtt->base);
3261}
3262 3389
3263static inline int __must_check 3390static inline int __must_check
3264i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, 3391i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
@@ -3272,12 +3399,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
3272 alignment, flags | PIN_GLOBAL); 3399 alignment, flags | PIN_GLOBAL);
3273} 3400}
3274 3401
3275static inline int
3276i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
3277{
3278 return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
3279}
3280
3281void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, 3402void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
3282 const struct i915_ggtt_view *view); 3403 const struct i915_ggtt_view *view);
3283static inline void 3404static inline void
@@ -3301,28 +3422,42 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
3301 3422
3302/* i915_gem_context.c */ 3423/* i915_gem_context.c */
3303int __must_check i915_gem_context_init(struct drm_device *dev); 3424int __must_check i915_gem_context_init(struct drm_device *dev);
3425void i915_gem_context_lost(struct drm_i915_private *dev_priv);
3304void i915_gem_context_fini(struct drm_device *dev); 3426void i915_gem_context_fini(struct drm_device *dev);
3305void i915_gem_context_reset(struct drm_device *dev); 3427void i915_gem_context_reset(struct drm_device *dev);
3306int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 3428int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
3307int i915_gem_context_enable(struct drm_i915_gem_request *req);
3308void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 3429void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
3309int i915_switch_context(struct drm_i915_gem_request *req); 3430int i915_switch_context(struct drm_i915_gem_request *req);
3310struct intel_context *
3311i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
3312void i915_gem_context_free(struct kref *ctx_ref); 3431void i915_gem_context_free(struct kref *ctx_ref);
3313struct drm_i915_gem_object * 3432struct drm_i915_gem_object *
3314i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); 3433i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
3315static inline void i915_gem_context_reference(struct intel_context *ctx) 3434
3435static inline struct i915_gem_context *
3436i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
3437{
3438 struct i915_gem_context *ctx;
3439
3440 lockdep_assert_held(&file_priv->dev_priv->dev->struct_mutex);
3441
3442 ctx = idr_find(&file_priv->context_idr, id);
3443 if (!ctx)
3444 return ERR_PTR(-ENOENT);
3445
3446 return ctx;
3447}
3448
3449static inline void i915_gem_context_reference(struct i915_gem_context *ctx)
3316{ 3450{
3317 kref_get(&ctx->ref); 3451 kref_get(&ctx->ref);
3318} 3452}
3319 3453
3320static inline void i915_gem_context_unreference(struct intel_context *ctx) 3454static inline void i915_gem_context_unreference(struct i915_gem_context *ctx)
3321{ 3455{
3456 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
3322 kref_put(&ctx->ref, i915_gem_context_free); 3457 kref_put(&ctx->ref, i915_gem_context_free);
3323} 3458}
3324 3459
3325static inline bool i915_gem_context_is_default(const struct intel_context *c) 3460static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
3326{ 3461{
3327 return c->user_handle == DEFAULT_CONTEXT_HANDLE; 3462 return c->user_handle == DEFAULT_CONTEXT_HANDLE;
3328} 3463}
@@ -3335,6 +3470,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
3335 struct drm_file *file_priv); 3470 struct drm_file *file_priv);
3336int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 3471int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
3337 struct drm_file *file_priv); 3472 struct drm_file *file_priv);
3473int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
3474 struct drm_file *file);
3338 3475
3339/* i915_gem_evict.c */ 3476/* i915_gem_evict.c */
3340int __must_check i915_gem_evict_something(struct drm_device *dev, 3477int __must_check i915_gem_evict_something(struct drm_device *dev,
@@ -3349,9 +3486,9 @@ int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
3349int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3486int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
3350 3487
3351/* belongs in i915_gem_gtt.h */ 3488/* belongs in i915_gem_gtt.h */
3352static inline void i915_gem_chipset_flush(struct drm_device *dev) 3489static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
3353{ 3490{
3354 if (INTEL_INFO(dev)->gen < 6) 3491 if (INTEL_GEN(dev_priv) < 6)
3355 intel_gtt_chipset_flush(); 3492 intel_gtt_chipset_flush();
3356} 3493}
3357 3494
@@ -3430,18 +3567,19 @@ static inline void i915_error_state_buf_release(
3430{ 3567{
3431 kfree(eb->buf); 3568 kfree(eb->buf);
3432} 3569}
3433void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, 3570void i915_capture_error_state(struct drm_i915_private *dev_priv,
3571 u32 engine_mask,
3434 const char *error_msg); 3572 const char *error_msg);
3435void i915_error_state_get(struct drm_device *dev, 3573void i915_error_state_get(struct drm_device *dev,
3436 struct i915_error_state_file_priv *error_priv); 3574 struct i915_error_state_file_priv *error_priv);
3437void i915_error_state_put(struct i915_error_state_file_priv *error_priv); 3575void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
3438void i915_destroy_error_state(struct drm_device *dev); 3576void i915_destroy_error_state(struct drm_device *dev);
3439 3577
3440void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); 3578void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
3441const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3579const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
3442 3580
3443/* i915_cmd_parser.c */ 3581/* i915_cmd_parser.c */
3444int i915_cmd_parser_get_version(void); 3582int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
3445int i915_cmd_parser_init_ring(struct intel_engine_cs *engine); 3583int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
3446void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine); 3584void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
3447bool i915_needs_cmd_parser(struct intel_engine_cs *engine); 3585bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
@@ -3489,31 +3627,33 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
3489 3627
3490/* intel_opregion.c */ 3628/* intel_opregion.c */
3491#ifdef CONFIG_ACPI 3629#ifdef CONFIG_ACPI
3492extern int intel_opregion_setup(struct drm_device *dev); 3630extern int intel_opregion_setup(struct drm_i915_private *dev_priv);
3493extern void intel_opregion_init(struct drm_device *dev); 3631extern void intel_opregion_register(struct drm_i915_private *dev_priv);
3494extern void intel_opregion_fini(struct drm_device *dev); 3632extern void intel_opregion_unregister(struct drm_i915_private *dev_priv);
3495extern void intel_opregion_asle_intr(struct drm_device *dev); 3633extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
3496extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 3634extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
3497 bool enable); 3635 bool enable);
3498extern int intel_opregion_notify_adapter(struct drm_device *dev, 3636extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
3499 pci_power_t state); 3637 pci_power_t state);
3500extern int intel_opregion_get_panel_type(struct drm_device *dev); 3638extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
3501#else 3639#else
3502static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } 3640static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
3503static inline void intel_opregion_init(struct drm_device *dev) { return; } 3641static inline void intel_opregion_init(struct drm_i915_private *dev) { }
3504static inline void intel_opregion_fini(struct drm_device *dev) { return; } 3642static inline void intel_opregion_fini(struct drm_i915_private *dev) { }
3505static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 3643static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
3644{
3645}
3506static inline int 3646static inline int
3507intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) 3647intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
3508{ 3648{
3509 return 0; 3649 return 0;
3510} 3650}
3511static inline int 3651static inline int
3512intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) 3652intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
3513{ 3653{
3514 return 0; 3654 return 0;
3515} 3655}
3516static inline int intel_opregion_get_panel_type(struct drm_device *dev) 3656static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
3517{ 3657{
3518 return -ENODEV; 3658 return -ENODEV;
3519} 3659}
@@ -3538,26 +3678,25 @@ extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
3538extern void intel_display_resume(struct drm_device *dev); 3678extern void intel_display_resume(struct drm_device *dev);
3539extern void i915_redisable_vga(struct drm_device *dev); 3679extern void i915_redisable_vga(struct drm_device *dev);
3540extern void i915_redisable_vga_power_on(struct drm_device *dev); 3680extern void i915_redisable_vga_power_on(struct drm_device *dev);
3541extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 3681extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
3542extern void intel_init_pch_refclk(struct drm_device *dev); 3682extern void intel_init_pch_refclk(struct drm_device *dev);
3543extern void intel_set_rps(struct drm_device *dev, u8 val); 3683extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
3544extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3684extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
3545 bool enable); 3685 bool enable);
3546extern void intel_detect_pch(struct drm_device *dev); 3686extern void intel_detect_pch(struct drm_device *dev);
3547extern int intel_enable_rc6(const struct drm_device *dev);
3548 3687
3549extern bool i915_semaphore_is_enabled(struct drm_device *dev); 3688extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
3550int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3689int i915_reg_read_ioctl(struct drm_device *dev, void *data,
3551 struct drm_file *file); 3690 struct drm_file *file);
3552int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
3553 struct drm_file *file);
3554 3691
3555/* overlay */ 3692/* overlay */
3556extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 3693extern struct intel_overlay_error_state *
3694intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
3557extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3695extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
3558 struct intel_overlay_error_state *error); 3696 struct intel_overlay_error_state *error);
3559 3697
3560extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); 3698extern struct intel_display_error_state *
3699intel_display_capture_error_state(struct drm_i915_private *dev_priv);
3561extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3700extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
3562 struct drm_device *dev, 3701 struct drm_device *dev,
3563 struct intel_display_error_state *error); 3702 struct intel_display_error_state *error);
@@ -3586,6 +3725,24 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
3586u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3725u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
3587void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3726void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3588 3727
3728/* intel_dpio_phy.c */
3729void chv_set_phy_signal_level(struct intel_encoder *encoder,
3730 u32 deemph_reg_value, u32 margin_reg_value,
3731 bool uniq_trans_scale);
3732void chv_data_lane_soft_reset(struct intel_encoder *encoder,
3733 bool reset);
3734void chv_phy_pre_pll_enable(struct intel_encoder *encoder);
3735void chv_phy_pre_encoder_enable(struct intel_encoder *encoder);
3736void chv_phy_release_cl2_override(struct intel_encoder *encoder);
3737void chv_phy_post_pll_disable(struct intel_encoder *encoder);
3738
3739void vlv_set_phy_signal_level(struct intel_encoder *encoder,
3740 u32 demph_reg_value, u32 preemph_reg_value,
3741 u32 uniqtranscale_reg_value, u32 tx3_demph);
3742void vlv_phy_pre_pll_enable(struct intel_encoder *encoder);
3743void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder);
3744void vlv_phy_reset_lanes(struct intel_encoder *encoder);
3745
3589int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3746int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
3590int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3747int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3591 3748
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index aad26851cee3..343d88114f3b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
177 vaddr += PAGE_SIZE; 177 vaddr += PAGE_SIZE;
178 } 178 }
179 179
180 i915_gem_chipset_flush(obj->base.dev); 180 i915_gem_chipset_flush(to_i915(obj->base.dev));
181 181
182 st = kmalloc(sizeof(*st), GFP_KERNEL); 182 st = kmalloc(sizeof(*st), GFP_KERNEL);
183 if (st == NULL) 183 if (st == NULL)
@@ -347,7 +347,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
347 } 347 }
348 348
349 drm_clflush_virt_range(vaddr, args->size); 349 drm_clflush_virt_range(vaddr, args->size);
350 i915_gem_chipset_flush(dev); 350 i915_gem_chipset_flush(to_i915(dev));
351 351
352out: 352out:
353 intel_fb_obj_flush(obj, false, ORIGIN_CPU); 353 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
@@ -381,9 +381,9 @@ i915_gem_create(struct drm_file *file,
381 return -EINVAL; 381 return -EINVAL;
382 382
383 /* Allocate the new object */ 383 /* Allocate the new object */
384 obj = i915_gem_alloc_object(dev, size); 384 obj = i915_gem_object_create(dev, size);
385 if (obj == NULL) 385 if (IS_ERR(obj))
386 return -ENOMEM; 386 return PTR_ERR(obj);
387 387
388 ret = drm_gem_handle_create(file, &obj->base, &handle); 388 ret = drm_gem_handle_create(file, &obj->base, &handle);
389 /* drop reference from allocate - handle holds it now */ 389 /* drop reference from allocate - handle holds it now */
@@ -1006,7 +1006,7 @@ out:
1006 } 1006 }
1007 1007
1008 if (needs_clflush_after) 1008 if (needs_clflush_after)
1009 i915_gem_chipset_flush(dev); 1009 i915_gem_chipset_flush(to_i915(dev));
1010 else 1010 else
1011 obj->cache_dirty = true; 1011 obj->cache_dirty = true;
1012 1012
@@ -1230,8 +1230,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1230 struct intel_rps_client *rps) 1230 struct intel_rps_client *rps)
1231{ 1231{
1232 struct intel_engine_cs *engine = i915_gem_request_get_engine(req); 1232 struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
1233 struct drm_device *dev = engine->dev; 1233 struct drm_i915_private *dev_priv = req->i915;
1234 struct drm_i915_private *dev_priv = dev->dev_private;
1235 const bool irq_test_in_progress = 1234 const bool irq_test_in_progress =
1236 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine); 1235 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
1237 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; 1236 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
@@ -1413,6 +1412,13 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
1413 list_del_init(&request->list); 1412 list_del_init(&request->list);
1414 i915_gem_request_remove_from_client(request); 1413 i915_gem_request_remove_from_client(request);
1415 1414
1415 if (request->previous_context) {
1416 if (i915.enable_execlists)
1417 intel_lr_context_unpin(request->previous_context,
1418 request->engine);
1419 }
1420
1421 i915_gem_context_unreference(request->ctx);
1416 i915_gem_request_unreference(request); 1422 i915_gem_request_unreference(request);
1417} 1423}
1418 1424
@@ -1422,7 +1428,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
1422 struct intel_engine_cs *engine = req->engine; 1428 struct intel_engine_cs *engine = req->engine;
1423 struct drm_i915_gem_request *tmp; 1429 struct drm_i915_gem_request *tmp;
1424 1430
1425 lockdep_assert_held(&engine->dev->struct_mutex); 1431 lockdep_assert_held(&engine->i915->dev->struct_mutex);
1426 1432
1427 if (list_empty(&req->list)) 1433 if (list_empty(&req->list))
1428 return; 1434 return;
@@ -1982,7 +1988,7 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1982 return size; 1988 return size;
1983 1989
1984 /* Previous chips need a power-of-two fence region when tiling */ 1990 /* Previous chips need a power-of-two fence region when tiling */
1985 if (INTEL_INFO(dev)->gen == 3) 1991 if (IS_GEN3(dev))
1986 gtt_size = 1024*1024; 1992 gtt_size = 1024*1024;
1987 else 1993 else
1988 gtt_size = 512*1024; 1994 gtt_size = 512*1024;
@@ -2162,7 +2168,8 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2162static void 2168static void
2163i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) 2169i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2164{ 2170{
2165 struct sg_page_iter sg_iter; 2171 struct sgt_iter sgt_iter;
2172 struct page *page;
2166 int ret; 2173 int ret;
2167 2174
2168 BUG_ON(obj->madv == __I915_MADV_PURGED); 2175 BUG_ON(obj->madv == __I915_MADV_PURGED);
@@ -2184,9 +2191,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2184 if (obj->madv == I915_MADV_DONTNEED) 2191 if (obj->madv == I915_MADV_DONTNEED)
2185 obj->dirty = 0; 2192 obj->dirty = 0;
2186 2193
2187 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 2194 for_each_sgt_page(page, sgt_iter, obj->pages) {
2188 struct page *page = sg_page_iter_page(&sg_iter);
2189
2190 if (obj->dirty) 2195 if (obj->dirty)
2191 set_page_dirty(page); 2196 set_page_dirty(page);
2192 2197
@@ -2243,7 +2248,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2243 struct address_space *mapping; 2248 struct address_space *mapping;
2244 struct sg_table *st; 2249 struct sg_table *st;
2245 struct scatterlist *sg; 2250 struct scatterlist *sg;
2246 struct sg_page_iter sg_iter; 2251 struct sgt_iter sgt_iter;
2247 struct page *page; 2252 struct page *page;
2248 unsigned long last_pfn = 0; /* suppress gcc warning */ 2253 unsigned long last_pfn = 0; /* suppress gcc warning */
2249 int ret; 2254 int ret;
@@ -2340,8 +2345,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2340 2345
2341err_pages: 2346err_pages:
2342 sg_mark_end(sg); 2347 sg_mark_end(sg);
2343 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) 2348 for_each_sgt_page(page, sgt_iter, st)
2344 put_page(sg_page_iter_page(&sg_iter)); 2349 put_page(page);
2345 sg_free_table(st); 2350 sg_free_table(st);
2346 kfree(st); 2351 kfree(st);
2347 2352
@@ -2395,6 +2400,44 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2395 return 0; 2400 return 0;
2396} 2401}
2397 2402
2403/* The 'mapping' part of i915_gem_object_pin_map() below */
2404static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2405{
2406 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2407 struct sg_table *sgt = obj->pages;
2408 struct sgt_iter sgt_iter;
2409 struct page *page;
2410 struct page *stack_pages[32];
2411 struct page **pages = stack_pages;
2412 unsigned long i = 0;
2413 void *addr;
2414
2415 /* A single page can always be kmapped */
2416 if (n_pages == 1)
2417 return kmap(sg_page(sgt->sgl));
2418
2419 if (n_pages > ARRAY_SIZE(stack_pages)) {
2420 /* Too big for stack -- allocate temporary array instead */
2421 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2422 if (!pages)
2423 return NULL;
2424 }
2425
2426 for_each_sgt_page(page, sgt_iter, sgt)
2427 pages[i++] = page;
2428
2429 /* Check that we have the expected number of pages */
2430 GEM_BUG_ON(i != n_pages);
2431
2432 addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
2433
2434 if (pages != stack_pages)
2435 drm_free_large(pages);
2436
2437 return addr;
2438}
2439
2440/* get, pin, and map the pages of the object into kernel space */
2398void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj) 2441void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2399{ 2442{
2400 int ret; 2443 int ret;
@@ -2407,29 +2450,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2407 2450
2408 i915_gem_object_pin_pages(obj); 2451 i915_gem_object_pin_pages(obj);
2409 2452
2410 if (obj->mapping == NULL) { 2453 if (!obj->mapping) {
2411 struct page **pages; 2454 obj->mapping = i915_gem_object_map(obj);
2412 2455 if (!obj->mapping) {
2413 pages = NULL;
2414 if (obj->base.size == PAGE_SIZE)
2415 obj->mapping = kmap(sg_page(obj->pages->sgl));
2416 else
2417 pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
2418 sizeof(*pages),
2419 GFP_TEMPORARY);
2420 if (pages != NULL) {
2421 struct sg_page_iter sg_iter;
2422 int n;
2423
2424 n = 0;
2425 for_each_sg_page(obj->pages->sgl, &sg_iter,
2426 obj->pages->nents, 0)
2427 pages[n++] = sg_page_iter_page(&sg_iter);
2428
2429 obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
2430 drm_free_large(pages);
2431 }
2432 if (obj->mapping == NULL) {
2433 i915_gem_object_unpin_pages(obj); 2456 i915_gem_object_unpin_pages(obj);
2434 return ERR_PTR(-ENOMEM); 2457 return ERR_PTR(-ENOMEM);
2435 } 2458 }
@@ -2502,9 +2525,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
2502} 2525}
2503 2526
2504static int 2527static int
2505i915_gem_init_seqno(struct drm_device *dev, u32 seqno) 2528i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
2506{ 2529{
2507 struct drm_i915_private *dev_priv = dev->dev_private;
2508 struct intel_engine_cs *engine; 2530 struct intel_engine_cs *engine;
2509 int ret; 2531 int ret;
2510 2532
@@ -2514,7 +2536,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2514 if (ret) 2536 if (ret)
2515 return ret; 2537 return ret;
2516 } 2538 }
2517 i915_gem_retire_requests(dev); 2539 i915_gem_retire_requests(dev_priv);
2518 2540
2519 /* Finally reset hw state */ 2541 /* Finally reset hw state */
2520 for_each_engine(engine, dev_priv) 2542 for_each_engine(engine, dev_priv)
@@ -2534,7 +2556,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2534 /* HWS page needs to be set less than what we 2556 /* HWS page needs to be set less than what we
2535 * will inject to ring 2557 * will inject to ring
2536 */ 2558 */
2537 ret = i915_gem_init_seqno(dev, seqno - 1); 2559 ret = i915_gem_init_seqno(dev_priv, seqno - 1);
2538 if (ret) 2560 if (ret)
2539 return ret; 2561 return ret;
2540 2562
@@ -2550,13 +2572,11 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2550} 2572}
2551 2573
2552int 2574int
2553i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) 2575i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
2554{ 2576{
2555 struct drm_i915_private *dev_priv = dev->dev_private;
2556
2557 /* reserve 0 for non-seqno */ 2577 /* reserve 0 for non-seqno */
2558 if (dev_priv->next_seqno == 0) { 2578 if (dev_priv->next_seqno == 0) {
2559 int ret = i915_gem_init_seqno(dev, 0); 2579 int ret = i915_gem_init_seqno(dev_priv, 0);
2560 if (ret) 2580 if (ret)
2561 return ret; 2581 return ret;
2562 2582
@@ -2580,6 +2600,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2580 struct drm_i915_private *dev_priv; 2600 struct drm_i915_private *dev_priv;
2581 struct intel_ringbuffer *ringbuf; 2601 struct intel_ringbuffer *ringbuf;
2582 u32 request_start; 2602 u32 request_start;
2603 u32 reserved_tail;
2583 int ret; 2604 int ret;
2584 2605
2585 if (WARN_ON(request == NULL)) 2606 if (WARN_ON(request == NULL))
@@ -2594,9 +2615,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2594 * should already have been reserved in the ring buffer. Let the ring 2615 * should already have been reserved in the ring buffer. Let the ring
2595 * know that it is time to use that space up. 2616 * know that it is time to use that space up.
2596 */ 2617 */
2597 intel_ring_reserved_space_use(ringbuf);
2598
2599 request_start = intel_ring_get_tail(ringbuf); 2618 request_start = intel_ring_get_tail(ringbuf);
2619 reserved_tail = request->reserved_space;
2620 request->reserved_space = 0;
2621
2600 /* 2622 /*
2601 * Emit any outstanding flushes - execbuf can fail to emit the flush 2623 * Emit any outstanding flushes - execbuf can fail to emit the flush
2602 * after having emitted the batchbuffer command. Hence we need to fix 2624 * after having emitted the batchbuffer command. Hence we need to fix
@@ -2652,19 +2674,25 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2652 /* Not allowed to fail! */ 2674 /* Not allowed to fail! */
2653 WARN(ret, "emit|add_request failed: %d!\n", ret); 2675 WARN(ret, "emit|add_request failed: %d!\n", ret);
2654 2676
2655 i915_queue_hangcheck(engine->dev); 2677 i915_queue_hangcheck(engine->i915);
2656 2678
2657 queue_delayed_work(dev_priv->wq, 2679 queue_delayed_work(dev_priv->wq,
2658 &dev_priv->mm.retire_work, 2680 &dev_priv->mm.retire_work,
2659 round_jiffies_up_relative(HZ)); 2681 round_jiffies_up_relative(HZ));
2660 intel_mark_busy(dev_priv->dev); 2682 intel_mark_busy(dev_priv);
2661 2683
2662 /* Sanity check that the reserved size was large enough. */ 2684 /* Sanity check that the reserved size was large enough. */
2663 intel_ring_reserved_space_end(ringbuf); 2685 ret = intel_ring_get_tail(ringbuf) - request_start;
2686 if (ret < 0)
2687 ret += ringbuf->size;
2688 WARN_ONCE(ret > reserved_tail,
2689 "Not enough space reserved (%d bytes) "
2690 "for adding the request (%d bytes)\n",
2691 reserved_tail, ret);
2664} 2692}
2665 2693
2666static bool i915_context_is_banned(struct drm_i915_private *dev_priv, 2694static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2667 const struct intel_context *ctx) 2695 const struct i915_gem_context *ctx)
2668{ 2696{
2669 unsigned long elapsed; 2697 unsigned long elapsed;
2670 2698
@@ -2689,7 +2717,7 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2689} 2717}
2690 2718
2691static void i915_set_reset_status(struct drm_i915_private *dev_priv, 2719static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2692 struct intel_context *ctx, 2720 struct i915_gem_context *ctx,
2693 const bool guilty) 2721 const bool guilty)
2694{ 2722{
2695 struct i915_ctx_hang_stats *hs; 2723 struct i915_ctx_hang_stats *hs;
@@ -2712,27 +2740,15 @@ void i915_gem_request_free(struct kref *req_ref)
2712{ 2740{
2713 struct drm_i915_gem_request *req = container_of(req_ref, 2741 struct drm_i915_gem_request *req = container_of(req_ref,
2714 typeof(*req), ref); 2742 typeof(*req), ref);
2715 struct intel_context *ctx = req->ctx;
2716
2717 if (req->file_priv)
2718 i915_gem_request_remove_from_client(req);
2719
2720 if (ctx) {
2721 if (i915.enable_execlists && ctx != req->i915->kernel_context)
2722 intel_lr_context_unpin(ctx, req->engine);
2723
2724 i915_gem_context_unreference(ctx);
2725 }
2726
2727 kmem_cache_free(req->i915->requests, req); 2743 kmem_cache_free(req->i915->requests, req);
2728} 2744}
2729 2745
2730static inline int 2746static inline int
2731__i915_gem_request_alloc(struct intel_engine_cs *engine, 2747__i915_gem_request_alloc(struct intel_engine_cs *engine,
2732 struct intel_context *ctx, 2748 struct i915_gem_context *ctx,
2733 struct drm_i915_gem_request **req_out) 2749 struct drm_i915_gem_request **req_out)
2734{ 2750{
2735 struct drm_i915_private *dev_priv = to_i915(engine->dev); 2751 struct drm_i915_private *dev_priv = engine->i915;
2736 unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error); 2752 unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
2737 struct drm_i915_gem_request *req; 2753 struct drm_i915_gem_request *req;
2738 int ret; 2754 int ret;
@@ -2754,7 +2770,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
2754 if (req == NULL) 2770 if (req == NULL)
2755 return -ENOMEM; 2771 return -ENOMEM;
2756 2772
2757 ret = i915_gem_get_seqno(engine->dev, &req->seqno); 2773 ret = i915_gem_get_seqno(engine->i915, &req->seqno);
2758 if (ret) 2774 if (ret)
2759 goto err; 2775 goto err;
2760 2776
@@ -2765,15 +2781,6 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
2765 req->ctx = ctx; 2781 req->ctx = ctx;
2766 i915_gem_context_reference(req->ctx); 2782 i915_gem_context_reference(req->ctx);
2767 2783
2768 if (i915.enable_execlists)
2769 ret = intel_logical_ring_alloc_request_extras(req);
2770 else
2771 ret = intel_ring_alloc_request_extras(req);
2772 if (ret) {
2773 i915_gem_context_unreference(req->ctx);
2774 goto err;
2775 }
2776
2777 /* 2784 /*
2778 * Reserve space in the ring buffer for all the commands required to 2785 * Reserve space in the ring buffer for all the commands required to
2779 * eventually emit this request. This is to guarantee that the 2786 * eventually emit this request. This is to guarantee that the
@@ -2781,24 +2788,20 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
2781 * to be redone if the request is not actually submitted straight 2788 * to be redone if the request is not actually submitted straight
2782 * away, e.g. because a GPU scheduler has deferred it. 2789 * away, e.g. because a GPU scheduler has deferred it.
2783 */ 2790 */
2791 req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
2792
2784 if (i915.enable_execlists) 2793 if (i915.enable_execlists)
2785 ret = intel_logical_ring_reserve_space(req); 2794 ret = intel_logical_ring_alloc_request_extras(req);
2786 else 2795 else
2787 ret = intel_ring_reserve_space(req); 2796 ret = intel_ring_alloc_request_extras(req);
2788 if (ret) { 2797 if (ret)
2789 /* 2798 goto err_ctx;
2790 * At this point, the request is fully allocated even if not
2791 * fully prepared. Thus it can be cleaned up using the proper
2792 * free code.
2793 */
2794 intel_ring_reserved_space_cancel(req->ringbuf);
2795 i915_gem_request_unreference(req);
2796 return ret;
2797 }
2798 2799
2799 *req_out = req; 2800 *req_out = req;
2800 return 0; 2801 return 0;
2801 2802
2803err_ctx:
2804 i915_gem_context_unreference(ctx);
2802err: 2805err:
2803 kmem_cache_free(dev_priv->requests, req); 2806 kmem_cache_free(dev_priv->requests, req);
2804 return ret; 2807 return ret;
@@ -2818,13 +2821,13 @@ err:
2818 */ 2821 */
2819struct drm_i915_gem_request * 2822struct drm_i915_gem_request *
2820i915_gem_request_alloc(struct intel_engine_cs *engine, 2823i915_gem_request_alloc(struct intel_engine_cs *engine,
2821 struct intel_context *ctx) 2824 struct i915_gem_context *ctx)
2822{ 2825{
2823 struct drm_i915_gem_request *req; 2826 struct drm_i915_gem_request *req;
2824 int err; 2827 int err;
2825 2828
2826 if (ctx == NULL) 2829 if (ctx == NULL)
2827 ctx = to_i915(engine->dev)->kernel_context; 2830 ctx = engine->i915->kernel_context;
2828 err = __i915_gem_request_alloc(engine, ctx, &req); 2831 err = __i915_gem_request_alloc(engine, ctx, &req);
2829 return err ? ERR_PTR(err) : req; 2832 return err ? ERR_PTR(err) : req;
2830} 2833}
@@ -2888,13 +2891,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
2888 /* Ensure irq handler finishes or is cancelled. */ 2891 /* Ensure irq handler finishes or is cancelled. */
2889 tasklet_kill(&engine->irq_tasklet); 2892 tasklet_kill(&engine->irq_tasklet);
2890 2893
2891 spin_lock_bh(&engine->execlist_lock); 2894 intel_execlists_cancel_requests(engine);
2892 /* list_splice_tail_init checks for empty lists */
2893 list_splice_tail_init(&engine->execlist_queue,
2894 &engine->execlist_retired_req_list);
2895 spin_unlock_bh(&engine->execlist_lock);
2896
2897 intel_execlists_retire_requests(engine);
2898 } 2895 }
2899 2896
2900 /* 2897 /*
@@ -3005,9 +3002,8 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
3005} 3002}
3006 3003
3007bool 3004bool
3008i915_gem_retire_requests(struct drm_device *dev) 3005i915_gem_retire_requests(struct drm_i915_private *dev_priv)
3009{ 3006{
3010 struct drm_i915_private *dev_priv = dev->dev_private;
3011 struct intel_engine_cs *engine; 3007 struct intel_engine_cs *engine;
3012 bool idle = true; 3008 bool idle = true;
3013 3009
@@ -3018,8 +3014,6 @@ i915_gem_retire_requests(struct drm_device *dev)
3018 spin_lock_bh(&engine->execlist_lock); 3014 spin_lock_bh(&engine->execlist_lock);
3019 idle &= list_empty(&engine->execlist_queue); 3015 idle &= list_empty(&engine->execlist_queue);
3020 spin_unlock_bh(&engine->execlist_lock); 3016 spin_unlock_bh(&engine->execlist_lock);
3021
3022 intel_execlists_retire_requests(engine);
3023 } 3017 }
3024 } 3018 }
3025 3019
@@ -3042,7 +3036,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
3042 /* Come back later if the device is busy... */ 3036 /* Come back later if the device is busy... */
3043 idle = false; 3037 idle = false;
3044 if (mutex_trylock(&dev->struct_mutex)) { 3038 if (mutex_trylock(&dev->struct_mutex)) {
3045 idle = i915_gem_retire_requests(dev); 3039 idle = i915_gem_retire_requests(dev_priv);
3046 mutex_unlock(&dev->struct_mutex); 3040 mutex_unlock(&dev->struct_mutex);
3047 } 3041 }
3048 if (!idle) 3042 if (!idle)
@@ -3066,7 +3060,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
3066 * Also locking seems to be fubar here, engine->request_list is protected 3060 * Also locking seems to be fubar here, engine->request_list is protected
3067 * by dev->struct_mutex. */ 3061 * by dev->struct_mutex. */
3068 3062
3069 intel_mark_idle(dev); 3063 intel_mark_idle(dev_priv);
3070 3064
3071 if (mutex_trylock(&dev->struct_mutex)) { 3065 if (mutex_trylock(&dev->struct_mutex)) {
3072 for_each_engine(engine, dev_priv) 3066 for_each_engine(engine, dev_priv)
@@ -3096,14 +3090,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
3096 if (req == NULL) 3090 if (req == NULL)
3097 continue; 3091 continue;
3098 3092
3099 if (list_empty(&req->list)) 3093 if (i915_gem_request_completed(req, true))
3100 goto retire;
3101
3102 if (i915_gem_request_completed(req, true)) {
3103 __i915_gem_request_retire__upto(req);
3104retire:
3105 i915_gem_object_retire__read(obj, i); 3094 i915_gem_object_retire__read(obj, i);
3106 }
3107 } 3095 }
3108 3096
3109 return 0; 3097 return 0;
@@ -3185,7 +3173,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3185 ret = __i915_wait_request(req[i], true, 3173 ret = __i915_wait_request(req[i], true,
3186 args->timeout_ns > 0 ? &args->timeout_ns : NULL, 3174 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
3187 to_rps_client(file)); 3175 to_rps_client(file));
3188 i915_gem_request_unreference__unlocked(req[i]); 3176 i915_gem_request_unreference(req[i]);
3189 } 3177 }
3190 return ret; 3178 return ret;
3191 3179
@@ -3211,7 +3199,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
3211 if (i915_gem_request_completed(from_req, true)) 3199 if (i915_gem_request_completed(from_req, true))
3212 return 0; 3200 return 0;
3213 3201
3214 if (!i915_semaphore_is_enabled(obj->base.dev)) { 3202 if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
3215 struct drm_i915_private *i915 = to_i915(obj->base.dev); 3203 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3216 ret = __i915_wait_request(from_req, 3204 ret = __i915_wait_request(from_req,
3217 i915->mm.interruptible, 3205 i915->mm.interruptible,
@@ -3345,6 +3333,17 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
3345 old_write_domain); 3333 old_write_domain);
3346} 3334}
3347 3335
3336static void __i915_vma_iounmap(struct i915_vma *vma)
3337{
3338 GEM_BUG_ON(vma->pin_count);
3339
3340 if (vma->iomap == NULL)
3341 return;
3342
3343 io_mapping_unmap(vma->iomap);
3344 vma->iomap = NULL;
3345}
3346
3348static int __i915_vma_unbind(struct i915_vma *vma, bool wait) 3347static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3349{ 3348{
3350 struct drm_i915_gem_object *obj = vma->obj; 3349 struct drm_i915_gem_object *obj = vma->obj;
@@ -3377,6 +3376,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3377 ret = i915_gem_object_put_fence(obj); 3376 ret = i915_gem_object_put_fence(obj);
3378 if (ret) 3377 if (ret)
3379 return ret; 3378 return ret;
3379
3380 __i915_vma_iounmap(vma);
3380 } 3381 }
3381 3382
3382 trace_i915_vma_unbind(vma); 3383 trace_i915_vma_unbind(vma);
@@ -3731,7 +3732,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3731 return; 3732 return;
3732 3733
3733 if (i915_gem_clflush_object(obj, obj->pin_display)) 3734 if (i915_gem_clflush_object(obj, obj->pin_display))
3734 i915_gem_chipset_flush(obj->base.dev); 3735 i915_gem_chipset_flush(to_i915(obj->base.dev));
3735 3736
3736 old_write_domain = obj->base.write_domain; 3737 old_write_domain = obj->base.write_domain;
3737 obj->base.write_domain = 0; 3738 obj->base.write_domain = 0;
@@ -3929,7 +3930,7 @@ out:
3929 obj->base.write_domain != I915_GEM_DOMAIN_CPU && 3930 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
3930 cpu_write_needs_clflush(obj)) { 3931 cpu_write_needs_clflush(obj)) {
3931 if (i915_gem_clflush_object(obj, true)) 3932 if (i915_gem_clflush_object(obj, true))
3932 i915_gem_chipset_flush(obj->base.dev); 3933 i915_gem_chipset_flush(to_i915(obj->base.dev));
3933 } 3934 }
3934 3935
3935 return 0; 3936 return 0;
@@ -4198,7 +4199,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4198 if (ret == 0) 4199 if (ret == 0)
4199 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 4200 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4200 4201
4201 i915_gem_request_unreference__unlocked(target); 4202 i915_gem_request_unreference(target);
4202 4203
4203 return ret; 4204 return ret;
4204} 4205}
@@ -4499,21 +4500,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4499 .put_pages = i915_gem_object_put_pages_gtt, 4500 .put_pages = i915_gem_object_put_pages_gtt,
4500}; 4501};
4501 4502
4502struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 4503struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
4503 size_t size) 4504 size_t size)
4504{ 4505{
4505 struct drm_i915_gem_object *obj; 4506 struct drm_i915_gem_object *obj;
4506 struct address_space *mapping; 4507 struct address_space *mapping;
4507 gfp_t mask; 4508 gfp_t mask;
4509 int ret;
4508 4510
4509 obj = i915_gem_object_alloc(dev); 4511 obj = i915_gem_object_alloc(dev);
4510 if (obj == NULL) 4512 if (obj == NULL)
4511 return NULL; 4513 return ERR_PTR(-ENOMEM);
4512 4514
4513 if (drm_gem_object_init(dev, &obj->base, size) != 0) { 4515 ret = drm_gem_object_init(dev, &obj->base, size);
4514 i915_gem_object_free(obj); 4516 if (ret)
4515 return NULL; 4517 goto fail;
4516 }
4517 4518
4518 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 4519 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4519 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) { 4520 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
@@ -4550,6 +4551,11 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4550 trace_i915_gem_object_create(obj); 4551 trace_i915_gem_object_create(obj);
4551 4552
4552 return obj; 4553 return obj;
4554
4555fail:
4556 i915_gem_object_free(obj);
4557
4558 return ERR_PTR(ret);
4553} 4559}
4554 4560
4555static bool discard_backing_storage(struct drm_i915_gem_object *obj) 4561static bool discard_backing_storage(struct drm_i915_gem_object *obj)
@@ -4655,16 +4661,12 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4655struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, 4661struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4656 const struct i915_ggtt_view *view) 4662 const struct i915_ggtt_view *view)
4657{ 4663{
4658 struct drm_device *dev = obj->base.dev;
4659 struct drm_i915_private *dev_priv = to_i915(dev);
4660 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4661 struct i915_vma *vma; 4664 struct i915_vma *vma;
4662 4665
4663 BUG_ON(!view); 4666 GEM_BUG_ON(!view);
4664 4667
4665 list_for_each_entry(vma, &obj->vma_list, obj_link) 4668 list_for_each_entry(vma, &obj->vma_list, obj_link)
4666 if (vma->vm == &ggtt->base && 4669 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
4667 i915_ggtt_view_equal(&vma->ggtt_view, view))
4668 return vma; 4670 return vma;
4669 return NULL; 4671 return NULL;
4670} 4672}
@@ -4706,9 +4708,10 @@ i915_gem_suspend(struct drm_device *dev)
4706 if (ret) 4708 if (ret)
4707 goto err; 4709 goto err;
4708 4710
4709 i915_gem_retire_requests(dev); 4711 i915_gem_retire_requests(dev_priv);
4710 4712
4711 i915_gem_stop_engines(dev); 4713 i915_gem_stop_engines(dev);
4714 i915_gem_context_lost(dev_priv);
4712 mutex_unlock(&dev->struct_mutex); 4715 mutex_unlock(&dev->struct_mutex);
4713 4716
4714 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 4717 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
@@ -4727,37 +4730,6 @@ err:
4727 return ret; 4730 return ret;
4728} 4731}
4729 4732
4730int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
4731{
4732 struct intel_engine_cs *engine = req->engine;
4733 struct drm_device *dev = engine->dev;
4734 struct drm_i915_private *dev_priv = dev->dev_private;
4735 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4736 int i, ret;
4737
4738 if (!HAS_L3_DPF(dev) || !remap_info)
4739 return 0;
4740
4741 ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
4742 if (ret)
4743 return ret;
4744
4745 /*
4746 * Note: We do not worry about the concurrent register cacheline hang
4747 * here because no other code should access these registers other than
4748 * at initialization time.
4749 */
4750 for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
4751 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
4752 intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
4753 intel_ring_emit(engine, remap_info[i]);
4754 }
4755
4756 intel_ring_advance(engine);
4757
4758 return ret;
4759}
4760
4761void i915_gem_init_swizzling(struct drm_device *dev) 4733void i915_gem_init_swizzling(struct drm_device *dev)
4762{ 4734{
4763 struct drm_i915_private *dev_priv = dev->dev_private; 4735 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4862,7 +4834,7 @@ i915_gem_init_hw(struct drm_device *dev)
4862{ 4834{
4863 struct drm_i915_private *dev_priv = dev->dev_private; 4835 struct drm_i915_private *dev_priv = dev->dev_private;
4864 struct intel_engine_cs *engine; 4836 struct intel_engine_cs *engine;
4865 int ret, j; 4837 int ret;
4866 4838
4867 /* Double layer security blanket, see i915_gem_init() */ 4839 /* Double layer security blanket, see i915_gem_init() */
4868 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4840 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4914,13 +4886,10 @@ i915_gem_init_hw(struct drm_device *dev)
4914 intel_mocs_init_l3cc_table(dev); 4886 intel_mocs_init_l3cc_table(dev);
4915 4887
4916 /* We can't enable contexts until all firmware is loaded */ 4888 /* We can't enable contexts until all firmware is loaded */
4917 if (HAS_GUC_UCODE(dev)) { 4889 if (HAS_GUC(dev)) {
4918 ret = intel_guc_ucode_load(dev); 4890 ret = intel_guc_setup(dev);
4919 if (ret) { 4891 if (ret)
4920 DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
4921 ret = -EIO;
4922 goto out; 4892 goto out;
4923 }
4924 } 4893 }
4925 4894
4926 /* 4895 /*
@@ -4928,44 +4897,6 @@ i915_gem_init_hw(struct drm_device *dev)
4928 * on re-initialisation 4897 * on re-initialisation
4929 */ 4898 */
4930 ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100); 4899 ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
4931 if (ret)
4932 goto out;
4933
4934 /* Now it is safe to go back round and do everything else: */
4935 for_each_engine(engine, dev_priv) {
4936 struct drm_i915_gem_request *req;
4937
4938 req = i915_gem_request_alloc(engine, NULL);
4939 if (IS_ERR(req)) {
4940 ret = PTR_ERR(req);
4941 break;
4942 }
4943
4944 if (engine->id == RCS) {
4945 for (j = 0; j < NUM_L3_SLICES(dev); j++) {
4946 ret = i915_gem_l3_remap(req, j);
4947 if (ret)
4948 goto err_request;
4949 }
4950 }
4951
4952 ret = i915_ppgtt_init_ring(req);
4953 if (ret)
4954 goto err_request;
4955
4956 ret = i915_gem_context_enable(req);
4957 if (ret)
4958 goto err_request;
4959
4960err_request:
4961 i915_add_request_no_flush(req);
4962 if (ret) {
4963 DRM_ERROR("Failed to enable %s, error=%d\n",
4964 engine->name, ret);
4965 i915_gem_cleanup_engines(dev);
4966 break;
4967 }
4968 }
4969 4900
4970out: 4901out:
4971 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4902 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -4977,9 +4908,6 @@ int i915_gem_init(struct drm_device *dev)
4977 struct drm_i915_private *dev_priv = dev->dev_private; 4908 struct drm_i915_private *dev_priv = dev->dev_private;
4978 int ret; 4909 int ret;
4979 4910
4980 i915.enable_execlists = intel_sanitize_enable_execlists(dev,
4981 i915.enable_execlists);
4982
4983 mutex_lock(&dev->struct_mutex); 4911 mutex_lock(&dev->struct_mutex);
4984 4912
4985 if (!i915.enable_execlists) { 4913 if (!i915.enable_execlists) {
@@ -5002,10 +4930,7 @@ int i915_gem_init(struct drm_device *dev)
5002 */ 4930 */
5003 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4931 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5004 4932
5005 ret = i915_gem_init_userptr(dev); 4933 i915_gem_init_userptr(dev_priv);
5006 if (ret)
5007 goto out_unlock;
5008
5009 i915_gem_init_ggtt(dev); 4934 i915_gem_init_ggtt(dev);
5010 4935
5011 ret = i915_gem_context_init(dev); 4936 ret = i915_gem_context_init(dev);
@@ -5042,14 +4967,6 @@ i915_gem_cleanup_engines(struct drm_device *dev)
5042 4967
5043 for_each_engine(engine, dev_priv) 4968 for_each_engine(engine, dev_priv)
5044 dev_priv->gt.cleanup_engine(engine); 4969 dev_priv->gt.cleanup_engine(engine);
5045
5046 if (i915.enable_execlists)
5047 /*
5048 * Neither the BIOS, ourselves or any other kernel
5049 * expects the system to be in execlists mode on startup,
5050 * so we need to reset the GPU back to legacy mode.
5051 */
5052 intel_gpu_reset(dev, ALL_ENGINES);
5053} 4970}
5054 4971
5055static void 4972static void
@@ -5073,7 +4990,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5073 else 4990 else
5074 dev_priv->num_fence_regs = 8; 4991 dev_priv->num_fence_regs = 8;
5075 4992
5076 if (intel_vgpu_active(dev)) 4993 if (intel_vgpu_active(dev_priv))
5077 dev_priv->num_fence_regs = 4994 dev_priv->num_fence_regs =
5078 I915_READ(vgtif_reg(avail_rs.fence_num)); 4995 I915_READ(vgtif_reg(avail_rs.fence_num));
5079 4996
@@ -5148,6 +5065,34 @@ void i915_gem_load_cleanup(struct drm_device *dev)
5148 kmem_cache_destroy(dev_priv->objects); 5065 kmem_cache_destroy(dev_priv->objects);
5149} 5066}
5150 5067
5068int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
5069{
5070 struct drm_i915_gem_object *obj;
5071
5072 /* Called just before we write the hibernation image.
5073 *
5074 * We need to update the domain tracking to reflect that the CPU
5075 * will be accessing all the pages to create and restore from the
5076 * hibernation, and so upon restoration those pages will be in the
5077 * CPU domain.
5078 *
5079 * To make sure the hibernation image contains the latest state,
5080 * we update that state just before writing out the image.
5081 */
5082
5083 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
5084 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
5085 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
5086 }
5087
5088 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5089 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
5090 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
5091 }
5092
5093 return 0;
5094}
5095
5151void i915_gem_release(struct drm_device *dev, struct drm_file *file) 5096void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5152{ 5097{
5153 struct drm_i915_file_private *file_priv = file->driver_priv; 5098 struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -5254,13 +5199,10 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
5254u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, 5199u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
5255 const struct i915_ggtt_view *view) 5200 const struct i915_ggtt_view *view)
5256{ 5201{
5257 struct drm_i915_private *dev_priv = to_i915(o->base.dev);
5258 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5259 struct i915_vma *vma; 5202 struct i915_vma *vma;
5260 5203
5261 list_for_each_entry(vma, &o->vma_list, obj_link) 5204 list_for_each_entry(vma, &o->vma_list, obj_link)
5262 if (vma->vm == &ggtt->base && 5205 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
5263 i915_ggtt_view_equal(&vma->ggtt_view, view))
5264 return vma->node.start; 5206 return vma->node.start;
5265 5207
5266 WARN(1, "global vma for this object not found. (view=%u)\n", view->type); 5208 WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
@@ -5286,12 +5228,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5286bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, 5228bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
5287 const struct i915_ggtt_view *view) 5229 const struct i915_ggtt_view *view)
5288{ 5230{
5289 struct drm_i915_private *dev_priv = to_i915(o->base.dev);
5290 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5291 struct i915_vma *vma; 5231 struct i915_vma *vma;
5292 5232
5293 list_for_each_entry(vma, &o->vma_list, obj_link) 5233 list_for_each_entry(vma, &o->vma_list, obj_link)
5294 if (vma->vm == &ggtt->base && 5234 if (vma->is_ggtt &&
5295 i915_ggtt_view_equal(&vma->ggtt_view, view) && 5235 i915_ggtt_view_equal(&vma->ggtt_view, view) &&
5296 drm_mm_node_allocated(&vma->node)) 5236 drm_mm_node_allocated(&vma->node))
5297 return true; 5237 return true;
@@ -5310,23 +5250,18 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5310 return false; 5250 return false;
5311} 5251}
5312 5252
5313unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, 5253unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
5314 struct i915_address_space *vm)
5315{ 5254{
5316 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5317 struct i915_vma *vma; 5255 struct i915_vma *vma;
5318 5256
5319 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); 5257 GEM_BUG_ON(list_empty(&o->vma_list));
5320
5321 BUG_ON(list_empty(&o->vma_list));
5322 5258
5323 list_for_each_entry(vma, &o->vma_list, obj_link) { 5259 list_for_each_entry(vma, &o->vma_list, obj_link) {
5324 if (vma->is_ggtt && 5260 if (vma->is_ggtt &&
5325 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 5261 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
5326 continue;
5327 if (vma->vm == vm)
5328 return vma->node.size; 5262 return vma->node.size;
5329 } 5263 }
5264
5330 return 0; 5265 return 0;
5331} 5266}
5332 5267
@@ -5365,8 +5300,8 @@ i915_gem_object_create_from_data(struct drm_device *dev,
5365 size_t bytes; 5300 size_t bytes;
5366 int ret; 5301 int ret;
5367 5302
5368 obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE)); 5303 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
5369 if (IS_ERR_OR_NULL(obj)) 5304 if (IS_ERR(obj))
5370 return obj; 5305 return obj;
5371 5306
5372 ret = i915_gem_object_set_to_cpu_domain(obj, true); 5307 ret = i915_gem_object_set_to_cpu_domain(obj, true);
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 7bf2f3f2968e..3752d5daa4b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -134,9 +134,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
134 if (obj == NULL) { 134 if (obj == NULL) {
135 int ret; 135 int ret;
136 136
137 obj = i915_gem_alloc_object(pool->dev, size); 137 obj = i915_gem_object_create(pool->dev, size);
138 if (obj == NULL) 138 if (IS_ERR(obj))
139 return ERR_PTR(-ENOMEM); 139 return obj;
140 140
141 ret = i915_gem_object_get_pages(obj); 141 ret = i915_gem_object_get_pages(obj);
142 if (ret) 142 if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index e5acc3916f75..a3b11aac23a4 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -90,6 +90,8 @@
90#include "i915_drv.h" 90#include "i915_drv.h"
91#include "i915_trace.h" 91#include "i915_trace.h"
92 92
93#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
94
93/* This is a HW constraint. The value below is the largest known requirement 95/* This is a HW constraint. The value below is the largest known requirement
94 * I've seen in a spec to date, and that was a workaround for a non-shipping 96 * I've seen in a spec to date, and that was a workaround for a non-shipping
95 * part. It should be safe to decrease this, but it's more future proof as is. 97 * part. It should be safe to decrease this, but it's more future proof as is.
@@ -97,28 +99,27 @@
97#define GEN6_CONTEXT_ALIGN (64<<10) 99#define GEN6_CONTEXT_ALIGN (64<<10)
98#define GEN7_CONTEXT_ALIGN 4096 100#define GEN7_CONTEXT_ALIGN 4096
99 101
100static size_t get_context_alignment(struct drm_device *dev) 102static size_t get_context_alignment(struct drm_i915_private *dev_priv)
101{ 103{
102 if (IS_GEN6(dev)) 104 if (IS_GEN6(dev_priv))
103 return GEN6_CONTEXT_ALIGN; 105 return GEN6_CONTEXT_ALIGN;
104 106
105 return GEN7_CONTEXT_ALIGN; 107 return GEN7_CONTEXT_ALIGN;
106} 108}
107 109
108static int get_context_size(struct drm_device *dev) 110static int get_context_size(struct drm_i915_private *dev_priv)
109{ 111{
110 struct drm_i915_private *dev_priv = dev->dev_private;
111 int ret; 112 int ret;
112 u32 reg; 113 u32 reg;
113 114
114 switch (INTEL_INFO(dev)->gen) { 115 switch (INTEL_GEN(dev_priv)) {
115 case 6: 116 case 6:
116 reg = I915_READ(CXT_SIZE); 117 reg = I915_READ(CXT_SIZE);
117 ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; 118 ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
118 break; 119 break;
119 case 7: 120 case 7:
120 reg = I915_READ(GEN7_CXT_SIZE); 121 reg = I915_READ(GEN7_CXT_SIZE);
121 if (IS_HASWELL(dev)) 122 if (IS_HASWELL(dev_priv))
122 ret = HSW_CXT_TOTAL_SIZE; 123 ret = HSW_CXT_TOTAL_SIZE;
123 else 124 else
124 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; 125 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
@@ -133,7 +134,7 @@ static int get_context_size(struct drm_device *dev)
133 return ret; 134 return ret;
134} 135}
135 136
136static void i915_gem_context_clean(struct intel_context *ctx) 137static void i915_gem_context_clean(struct i915_gem_context *ctx)
137{ 138{
138 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 139 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
139 struct i915_vma *vma, *next; 140 struct i915_vma *vma, *next;
@@ -150,13 +151,12 @@ static void i915_gem_context_clean(struct intel_context *ctx)
150 151
151void i915_gem_context_free(struct kref *ctx_ref) 152void i915_gem_context_free(struct kref *ctx_ref)
152{ 153{
153 struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); 154 struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
155 int i;
154 156
157 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
155 trace_i915_context_free(ctx); 158 trace_i915_context_free(ctx);
156 159
157 if (i915.enable_execlists)
158 intel_lr_context_free(ctx);
159
160 /* 160 /*
161 * This context is going away and we need to remove all VMAs still 161 * This context is going away and we need to remove all VMAs still
162 * around. This is to handle imported shared objects for which 162 * around. This is to handle imported shared objects for which
@@ -166,9 +166,22 @@ void i915_gem_context_free(struct kref *ctx_ref)
166 166
167 i915_ppgtt_put(ctx->ppgtt); 167 i915_ppgtt_put(ctx->ppgtt);
168 168
169 if (ctx->legacy_hw_ctx.rcs_state) 169 for (i = 0; i < I915_NUM_ENGINES; i++) {
170 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); 170 struct intel_context *ce = &ctx->engine[i];
171
172 if (!ce->state)
173 continue;
174
175 WARN_ON(ce->pin_count);
176 if (ce->ringbuf)
177 intel_ringbuffer_free(ce->ringbuf);
178
179 drm_gem_object_unreference(&ce->state->base);
180 }
181
171 list_del(&ctx->link); 182 list_del(&ctx->link);
183
184 ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
172 kfree(ctx); 185 kfree(ctx);
173} 186}
174 187
@@ -178,9 +191,11 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
178 struct drm_i915_gem_object *obj; 191 struct drm_i915_gem_object *obj;
179 int ret; 192 int ret;
180 193
181 obj = i915_gem_alloc_object(dev, size); 194 lockdep_assert_held(&dev->struct_mutex);
182 if (obj == NULL) 195
183 return ERR_PTR(-ENOMEM); 196 obj = i915_gem_object_create(dev, size);
197 if (IS_ERR(obj))
198 return obj;
184 199
185 /* 200 /*
186 * Try to make the context utilize L3 as well as LLC. 201 * Try to make the context utilize L3 as well as LLC.
@@ -209,18 +224,46 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
209 return obj; 224 return obj;
210} 225}
211 226
212static struct intel_context * 227static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
228{
229 int ret;
230
231 ret = ida_simple_get(&dev_priv->context_hw_ida,
232 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
233 if (ret < 0) {
234 /* Contexts are only released when no longer active.
235 * Flush any pending retires to hopefully release some
236 * stale contexts and try again.
237 */
238 i915_gem_retire_requests(dev_priv);
239 ret = ida_simple_get(&dev_priv->context_hw_ida,
240 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
241 if (ret < 0)
242 return ret;
243 }
244
245 *out = ret;
246 return 0;
247}
248
249static struct i915_gem_context *
213__create_hw_context(struct drm_device *dev, 250__create_hw_context(struct drm_device *dev,
214 struct drm_i915_file_private *file_priv) 251 struct drm_i915_file_private *file_priv)
215{ 252{
216 struct drm_i915_private *dev_priv = dev->dev_private; 253 struct drm_i915_private *dev_priv = dev->dev_private;
217 struct intel_context *ctx; 254 struct i915_gem_context *ctx;
218 int ret; 255 int ret;
219 256
220 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 257 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
221 if (ctx == NULL) 258 if (ctx == NULL)
222 return ERR_PTR(-ENOMEM); 259 return ERR_PTR(-ENOMEM);
223 260
261 ret = assign_hw_id(dev_priv, &ctx->hw_id);
262 if (ret) {
263 kfree(ctx);
264 return ERR_PTR(ret);
265 }
266
224 kref_init(&ctx->ref); 267 kref_init(&ctx->ref);
225 list_add_tail(&ctx->link, &dev_priv->context_list); 268 list_add_tail(&ctx->link, &dev_priv->context_list);
226 ctx->i915 = dev_priv; 269 ctx->i915 = dev_priv;
@@ -232,7 +275,7 @@ __create_hw_context(struct drm_device *dev,
232 ret = PTR_ERR(obj); 275 ret = PTR_ERR(obj);
233 goto err_out; 276 goto err_out;
234 } 277 }
235 ctx->legacy_hw_ctx.rcs_state = obj; 278 ctx->engine[RCS].state = obj;
236 } 279 }
237 280
238 /* Default context will never have a file_priv */ 281 /* Default context will never have a file_priv */
@@ -249,7 +292,7 @@ __create_hw_context(struct drm_device *dev,
249 /* NB: Mark all slices as needing a remap so that when the context first 292 /* NB: Mark all slices as needing a remap so that when the context first
250 * loads it will restore whatever remap state already exists. If there 293 * loads it will restore whatever remap state already exists. If there
251 * is no remap info, it will be a NOP. */ 294 * is no remap info, it will be a NOP. */
252 ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1; 295 ctx->remap_slice = ALL_L3_SLICES(dev_priv);
253 296
254 ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; 297 ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
255 298
@@ -265,44 +308,27 @@ err_out:
265 * context state of the GPU for applications that don't utilize HW contexts, as 308 * context state of the GPU for applications that don't utilize HW contexts, as
266 * well as an idle case. 309 * well as an idle case.
267 */ 310 */
268static struct intel_context * 311static struct i915_gem_context *
269i915_gem_create_context(struct drm_device *dev, 312i915_gem_create_context(struct drm_device *dev,
270 struct drm_i915_file_private *file_priv) 313 struct drm_i915_file_private *file_priv)
271{ 314{
272 const bool is_global_default_ctx = file_priv == NULL; 315 struct i915_gem_context *ctx;
273 struct intel_context *ctx;
274 int ret = 0;
275 316
276 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 317 lockdep_assert_held(&dev->struct_mutex);
277 318
278 ctx = __create_hw_context(dev, file_priv); 319 ctx = __create_hw_context(dev, file_priv);
279 if (IS_ERR(ctx)) 320 if (IS_ERR(ctx))
280 return ctx; 321 return ctx;
281 322
282 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
283 /* We may need to do things with the shrinker which
284 * require us to immediately switch back to the default
285 * context. This can cause a problem as pinning the
286 * default context also requires GTT space which may not
287 * be available. To avoid this we always pin the default
288 * context.
289 */
290 ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
291 get_context_alignment(dev), 0);
292 if (ret) {
293 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
294 goto err_destroy;
295 }
296 }
297
298 if (USES_FULL_PPGTT(dev)) { 323 if (USES_FULL_PPGTT(dev)) {
299 struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv); 324 struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
300 325
301 if (IS_ERR_OR_NULL(ppgtt)) { 326 if (IS_ERR(ppgtt)) {
302 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", 327 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
303 PTR_ERR(ppgtt)); 328 PTR_ERR(ppgtt));
304 ret = PTR_ERR(ppgtt); 329 idr_remove(&file_priv->context_idr, ctx->user_handle);
305 goto err_unpin; 330 i915_gem_context_unreference(ctx);
331 return ERR_CAST(ppgtt);
306 } 332 }
307 333
308 ctx->ppgtt = ppgtt; 334 ctx->ppgtt = ppgtt;
@@ -311,24 +337,19 @@ i915_gem_create_context(struct drm_device *dev,
311 trace_i915_context_create(ctx); 337 trace_i915_context_create(ctx);
312 338
313 return ctx; 339 return ctx;
314
315err_unpin:
316 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
317 i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
318err_destroy:
319 idr_remove(&file_priv->context_idr, ctx->user_handle);
320 i915_gem_context_unreference(ctx);
321 return ERR_PTR(ret);
322} 340}
323 341
324static void i915_gem_context_unpin(struct intel_context *ctx, 342static void i915_gem_context_unpin(struct i915_gem_context *ctx,
325 struct intel_engine_cs *engine) 343 struct intel_engine_cs *engine)
326{ 344{
327 if (i915.enable_execlists) { 345 if (i915.enable_execlists) {
328 intel_lr_context_unpin(ctx, engine); 346 intel_lr_context_unpin(ctx, engine);
329 } else { 347 } else {
330 if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state) 348 struct intel_context *ce = &ctx->engine[engine->id];
331 i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); 349
350 if (ce->state)
351 i915_gem_object_ggtt_unpin(ce->state);
352
332 i915_gem_context_unreference(ctx); 353 i915_gem_context_unreference(ctx);
333 } 354 }
334} 355}
@@ -336,51 +357,48 @@ static void i915_gem_context_unpin(struct intel_context *ctx,
336void i915_gem_context_reset(struct drm_device *dev) 357void i915_gem_context_reset(struct drm_device *dev)
337{ 358{
338 struct drm_i915_private *dev_priv = dev->dev_private; 359 struct drm_i915_private *dev_priv = dev->dev_private;
339 int i; 360
361 lockdep_assert_held(&dev->struct_mutex);
340 362
341 if (i915.enable_execlists) { 363 if (i915.enable_execlists) {
342 struct intel_context *ctx; 364 struct i915_gem_context *ctx;
343 365
344 list_for_each_entry(ctx, &dev_priv->context_list, link) 366 list_for_each_entry(ctx, &dev_priv->context_list, link)
345 intel_lr_context_reset(dev_priv, ctx); 367 intel_lr_context_reset(dev_priv, ctx);
346 } 368 }
347 369
348 for (i = 0; i < I915_NUM_ENGINES; i++) { 370 i915_gem_context_lost(dev_priv);
349 struct intel_engine_cs *engine = &dev_priv->engine[i];
350
351 if (engine->last_context) {
352 i915_gem_context_unpin(engine->last_context, engine);
353 engine->last_context = NULL;
354 }
355 }
356
357 /* Force the GPU state to be reinitialised on enabling */
358 dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
359} 371}
360 372
361int i915_gem_context_init(struct drm_device *dev) 373int i915_gem_context_init(struct drm_device *dev)
362{ 374{
363 struct drm_i915_private *dev_priv = dev->dev_private; 375 struct drm_i915_private *dev_priv = dev->dev_private;
364 struct intel_context *ctx; 376 struct i915_gem_context *ctx;
365 377
366 /* Init should only be called once per module load. Eventually the 378 /* Init should only be called once per module load. Eventually the
367 * restriction on the context_disabled check can be loosened. */ 379 * restriction on the context_disabled check can be loosened. */
368 if (WARN_ON(dev_priv->kernel_context)) 380 if (WARN_ON(dev_priv->kernel_context))
369 return 0; 381 return 0;
370 382
371 if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { 383 if (intel_vgpu_active(dev_priv) &&
384 HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
372 if (!i915.enable_execlists) { 385 if (!i915.enable_execlists) {
373 DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); 386 DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
374 return -EINVAL; 387 return -EINVAL;
375 } 388 }
376 } 389 }
377 390
391 /* Using the simple ida interface, the max is limited by sizeof(int) */
392 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
393 ida_init(&dev_priv->context_hw_ida);
394
378 if (i915.enable_execlists) { 395 if (i915.enable_execlists) {
379 /* NB: intentionally left blank. We will allocate our own 396 /* NB: intentionally left blank. We will allocate our own
380 * backing objects as we need them, thank you very much */ 397 * backing objects as we need them, thank you very much */
381 dev_priv->hw_context_size = 0; 398 dev_priv->hw_context_size = 0;
382 } else if (HAS_HW_CONTEXTS(dev)) { 399 } else if (HAS_HW_CONTEXTS(dev_priv)) {
383 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); 400 dev_priv->hw_context_size =
401 round_up(get_context_size(dev_priv), 4096);
384 if (dev_priv->hw_context_size > (1<<20)) { 402 if (dev_priv->hw_context_size > (1<<20)) {
385 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", 403 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
386 dev_priv->hw_context_size); 404 dev_priv->hw_context_size);
@@ -395,6 +413,26 @@ int i915_gem_context_init(struct drm_device *dev)
395 return PTR_ERR(ctx); 413 return PTR_ERR(ctx);
396 } 414 }
397 415
416 if (!i915.enable_execlists && ctx->engine[RCS].state) {
417 int ret;
418
419 /* We may need to do things with the shrinker which
420 * require us to immediately switch back to the default
421 * context. This can cause a problem as pinning the
422 * default context also requires GTT space which may not
423 * be available. To avoid this we always pin the default
424 * context.
425 */
426 ret = i915_gem_obj_ggtt_pin(ctx->engine[RCS].state,
427 get_context_alignment(dev_priv), 0);
428 if (ret) {
429 DRM_ERROR("Failed to pinned default global context (error %d)\n",
430 ret);
431 i915_gem_context_unreference(ctx);
432 return ret;
433 }
434 }
435
398 dev_priv->kernel_context = ctx; 436 dev_priv->kernel_context = ctx;
399 437
400 DRM_DEBUG_DRIVER("%s context support initialized\n", 438 DRM_DEBUG_DRIVER("%s context support initialized\n",
@@ -403,67 +441,48 @@ int i915_gem_context_init(struct drm_device *dev)
403 return 0; 441 return 0;
404} 442}
405 443
406void i915_gem_context_fini(struct drm_device *dev) 444void i915_gem_context_lost(struct drm_i915_private *dev_priv)
407{ 445{
408 struct drm_i915_private *dev_priv = dev->dev_private; 446 struct intel_engine_cs *engine;
409 struct intel_context *dctx = dev_priv->kernel_context;
410 int i;
411 447
412 if (dctx->legacy_hw_ctx.rcs_state) { 448 lockdep_assert_held(&dev_priv->dev->struct_mutex);
413 /* The only known way to stop the gpu from accessing the hw context is
414 * to reset it. Do this as the very last operation to avoid confusing
415 * other code, leading to spurious errors. */
416 intel_gpu_reset(dev, ALL_ENGINES);
417
418 /* When default context is created and switched to, base object refcount
419 * will be 2 (+1 from object creation and +1 from do_switch()).
420 * i915_gem_context_fini() will be called after gpu_idle() has switched
421 * to default context. So we need to unreference the base object once
422 * to offset the do_switch part, so that i915_gem_context_unreference()
423 * can then free the base object correctly. */
424 WARN_ON(!dev_priv->engine[RCS].last_context);
425
426 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
427 }
428
429 for (i = I915_NUM_ENGINES; --i >= 0;) {
430 struct intel_engine_cs *engine = &dev_priv->engine[i];
431 449
450 for_each_engine(engine, dev_priv) {
432 if (engine->last_context) { 451 if (engine->last_context) {
433 i915_gem_context_unpin(engine->last_context, engine); 452 i915_gem_context_unpin(engine->last_context, engine);
434 engine->last_context = NULL; 453 engine->last_context = NULL;
435 } 454 }
455
456 /* Force the GPU state to be reinitialised on enabling */
457 dev_priv->kernel_context->engine[engine->id].initialised =
458 engine->init_context == NULL;
436 } 459 }
437 460
438 i915_gem_context_unreference(dctx); 461 /* Force the GPU state to be reinitialised on enabling */
439 dev_priv->kernel_context = NULL; 462 dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv);
440} 463}
441 464
442int i915_gem_context_enable(struct drm_i915_gem_request *req) 465void i915_gem_context_fini(struct drm_device *dev)
443{ 466{
444 struct intel_engine_cs *engine = req->engine; 467 struct drm_i915_private *dev_priv = dev->dev_private;
445 int ret; 468 struct i915_gem_context *dctx = dev_priv->kernel_context;
446 469
447 if (i915.enable_execlists) { 470 lockdep_assert_held(&dev->struct_mutex);
448 if (engine->init_context == NULL)
449 return 0;
450 471
451 ret = engine->init_context(req); 472 if (!i915.enable_execlists && dctx->engine[RCS].state)
452 } else 473 i915_gem_object_ggtt_unpin(dctx->engine[RCS].state);
453 ret = i915_switch_context(req);
454 474
455 if (ret) { 475 i915_gem_context_unreference(dctx);
456 DRM_ERROR("ring init context: %d\n", ret); 476 dev_priv->kernel_context = NULL;
457 return ret;
458 }
459 477
460 return 0; 478 ida_destroy(&dev_priv->context_hw_ida);
461} 479}
462 480
463static int context_idr_cleanup(int id, void *p, void *data) 481static int context_idr_cleanup(int id, void *p, void *data)
464{ 482{
465 struct intel_context *ctx = p; 483 struct i915_gem_context *ctx = p;
466 484
485 ctx->file_priv = ERR_PTR(-EBADF);
467 i915_gem_context_unreference(ctx); 486 i915_gem_context_unreference(ctx);
468 return 0; 487 return 0;
469} 488}
@@ -471,7 +490,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
471int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) 490int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
472{ 491{
473 struct drm_i915_file_private *file_priv = file->driver_priv; 492 struct drm_i915_file_private *file_priv = file->driver_priv;
474 struct intel_context *ctx; 493 struct i915_gem_context *ctx;
475 494
476 idr_init(&file_priv->context_idr); 495 idr_init(&file_priv->context_idr);
477 496
@@ -491,31 +510,22 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
491{ 510{
492 struct drm_i915_file_private *file_priv = file->driver_priv; 511 struct drm_i915_file_private *file_priv = file->driver_priv;
493 512
513 lockdep_assert_held(&dev->struct_mutex);
514
494 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 515 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
495 idr_destroy(&file_priv->context_idr); 516 idr_destroy(&file_priv->context_idr);
496} 517}
497 518
498struct intel_context *
499i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
500{
501 struct intel_context *ctx;
502
503 ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
504 if (!ctx)
505 return ERR_PTR(-ENOENT);
506
507 return ctx;
508}
509
510static inline int 519static inline int
511mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) 520mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
512{ 521{
522 struct drm_i915_private *dev_priv = req->i915;
513 struct intel_engine_cs *engine = req->engine; 523 struct intel_engine_cs *engine = req->engine;
514 u32 flags = hw_flags | MI_MM_SPACE_GTT; 524 u32 flags = hw_flags | MI_MM_SPACE_GTT;
515 const int num_rings = 525 const int num_rings =
516 /* Use an extended w/a on ivb+ if signalling from other rings */ 526 /* Use an extended w/a on ivb+ if signalling from other rings */
517 i915_semaphore_is_enabled(engine->dev) ? 527 i915_semaphore_is_enabled(dev_priv) ?
518 hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 : 528 hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
519 0; 529 0;
520 int len, ret; 530 int len, ret;
521 531
@@ -524,21 +534,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
524 * explicitly, so we rely on the value at ring init, stored in 534 * explicitly, so we rely on the value at ring init, stored in
525 * itlb_before_ctx_switch. 535 * itlb_before_ctx_switch.
526 */ 536 */
527 if (IS_GEN6(engine->dev)) { 537 if (IS_GEN6(dev_priv)) {
528 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0); 538 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
529 if (ret) 539 if (ret)
530 return ret; 540 return ret;
531 } 541 }
532 542
533 /* These flags are for resource streamer on HSW+ */ 543 /* These flags are for resource streamer on HSW+ */
534 if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8) 544 if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
535 flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); 545 flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
536 else if (INTEL_INFO(engine->dev)->gen < 8) 546 else if (INTEL_GEN(dev_priv) < 8)
537 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); 547 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
538 548
539 549
540 len = 4; 550 len = 4;
541 if (INTEL_INFO(engine->dev)->gen >= 7) 551 if (INTEL_GEN(dev_priv) >= 7)
542 len += 2 + (num_rings ? 4*num_rings + 6 : 0); 552 len += 2 + (num_rings ? 4*num_rings + 6 : 0);
543 553
544 ret = intel_ring_begin(req, len); 554 ret = intel_ring_begin(req, len);
@@ -546,14 +556,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
546 return ret; 556 return ret;
547 557
548 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 558 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
549 if (INTEL_INFO(engine->dev)->gen >= 7) { 559 if (INTEL_GEN(dev_priv) >= 7) {
550 intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE); 560 intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
551 if (num_rings) { 561 if (num_rings) {
552 struct intel_engine_cs *signaller; 562 struct intel_engine_cs *signaller;
553 563
554 intel_ring_emit(engine, 564 intel_ring_emit(engine,
555 MI_LOAD_REGISTER_IMM(num_rings)); 565 MI_LOAD_REGISTER_IMM(num_rings));
556 for_each_engine(signaller, to_i915(engine->dev)) { 566 for_each_engine(signaller, dev_priv) {
557 if (signaller == engine) 567 if (signaller == engine)
558 continue; 568 continue;
559 569
@@ -568,7 +578,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
568 intel_ring_emit(engine, MI_NOOP); 578 intel_ring_emit(engine, MI_NOOP);
569 intel_ring_emit(engine, MI_SET_CONTEXT); 579 intel_ring_emit(engine, MI_SET_CONTEXT);
570 intel_ring_emit(engine, 580 intel_ring_emit(engine,
571 i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) | 581 i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
572 flags); 582 flags);
573 /* 583 /*
574 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 584 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
@@ -576,14 +586,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
576 */ 586 */
577 intel_ring_emit(engine, MI_NOOP); 587 intel_ring_emit(engine, MI_NOOP);
578 588
579 if (INTEL_INFO(engine->dev)->gen >= 7) { 589 if (INTEL_GEN(dev_priv) >= 7) {
580 if (num_rings) { 590 if (num_rings) {
581 struct intel_engine_cs *signaller; 591 struct intel_engine_cs *signaller;
582 i915_reg_t last_reg = {}; /* keep gcc quiet */ 592 i915_reg_t last_reg = {}; /* keep gcc quiet */
583 593
584 intel_ring_emit(engine, 594 intel_ring_emit(engine,
585 MI_LOAD_REGISTER_IMM(num_rings)); 595 MI_LOAD_REGISTER_IMM(num_rings));
586 for_each_engine(signaller, to_i915(engine->dev)) { 596 for_each_engine(signaller, dev_priv) {
587 if (signaller == engine) 597 if (signaller == engine)
588 continue; 598 continue;
589 599
@@ -609,45 +619,83 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
609 return ret; 619 return ret;
610} 620}
611 621
612static inline bool skip_rcs_switch(struct intel_engine_cs *engine, 622static int remap_l3(struct drm_i915_gem_request *req, int slice)
613 struct intel_context *to) 623{
624 u32 *remap_info = req->i915->l3_parity.remap_info[slice];
625 struct intel_engine_cs *engine = req->engine;
626 int i, ret;
627
628 if (!remap_info)
629 return 0;
630
631 ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
632 if (ret)
633 return ret;
634
635 /*
636 * Note: We do not worry about the concurrent register cacheline hang
637 * here because no other code should access these registers other than
638 * at initialization time.
639 */
640 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
641 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
642 intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
643 intel_ring_emit(engine, remap_info[i]);
644 }
645 intel_ring_emit(engine, MI_NOOP);
646 intel_ring_advance(engine);
647
648 return 0;
649}
650
651static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
652 struct intel_engine_cs *engine,
653 struct i915_gem_context *to)
614{ 654{
615 if (to->remap_slice) 655 if (to->remap_slice)
616 return false; 656 return false;
617 657
618 if (!to->legacy_hw_ctx.initialized) 658 if (!to->engine[RCS].initialised)
619 return false; 659 return false;
620 660
621 if (to->ppgtt && 661 if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
622 !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
623 return false; 662 return false;
624 663
625 return to == engine->last_context; 664 return to == engine->last_context;
626} 665}
627 666
628static bool 667static bool
629needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to) 668needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
669 struct intel_engine_cs *engine,
670 struct i915_gem_context *to)
630{ 671{
631 if (!to->ppgtt) 672 if (!ppgtt)
632 return false; 673 return false;
633 674
675 /* Always load the ppgtt on first use */
676 if (!engine->last_context)
677 return true;
678
679 /* Same context without new entries, skip */
634 if (engine->last_context == to && 680 if (engine->last_context == to &&
635 !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) 681 !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
636 return false; 682 return false;
637 683
638 if (engine->id != RCS) 684 if (engine->id != RCS)
639 return true; 685 return true;
640 686
641 if (INTEL_INFO(engine->dev)->gen < 8) 687 if (INTEL_GEN(engine->i915) < 8)
642 return true; 688 return true;
643 689
644 return false; 690 return false;
645} 691}
646 692
647static bool 693static bool
648needs_pd_load_post(struct intel_context *to, u32 hw_flags) 694needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
695 struct i915_gem_context *to,
696 u32 hw_flags)
649{ 697{
650 if (!to->ppgtt) 698 if (!ppgtt)
651 return false; 699 return false;
652 700
653 if (!IS_GEN8(to->i915)) 701 if (!IS_GEN8(to->i915))
@@ -661,18 +709,19 @@ needs_pd_load_post(struct intel_context *to, u32 hw_flags)
661 709
662static int do_rcs_switch(struct drm_i915_gem_request *req) 710static int do_rcs_switch(struct drm_i915_gem_request *req)
663{ 711{
664 struct intel_context *to = req->ctx; 712 struct i915_gem_context *to = req->ctx;
665 struct intel_engine_cs *engine = req->engine; 713 struct intel_engine_cs *engine = req->engine;
666 struct intel_context *from; 714 struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
715 struct i915_gem_context *from;
667 u32 hw_flags; 716 u32 hw_flags;
668 int ret, i; 717 int ret, i;
669 718
670 if (skip_rcs_switch(engine, to)) 719 if (skip_rcs_switch(ppgtt, engine, to))
671 return 0; 720 return 0;
672 721
673 /* Trying to pin first makes error handling easier. */ 722 /* Trying to pin first makes error handling easier. */
674 ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, 723 ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
675 get_context_alignment(engine->dev), 724 get_context_alignment(engine->i915),
676 0); 725 0);
677 if (ret) 726 if (ret)
678 return ret; 727 return ret;
@@ -694,37 +743,32 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
694 * 743 *
695 * XXX: We need a real interface to do this instead of trickery. 744 * XXX: We need a real interface to do this instead of trickery.
696 */ 745 */
697 ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false); 746 ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
698 if (ret) 747 if (ret)
699 goto unpin_out; 748 goto unpin_out;
700 749
701 if (needs_pd_load_pre(engine, to)) { 750 if (needs_pd_load_pre(ppgtt, engine, to)) {
702 /* Older GENs and non render rings still want the load first, 751 /* Older GENs and non render rings still want the load first,
703 * "PP_DCLV followed by PP_DIR_BASE register through Load 752 * "PP_DCLV followed by PP_DIR_BASE register through Load
704 * Register Immediate commands in Ring Buffer before submitting 753 * Register Immediate commands in Ring Buffer before submitting
705 * a context."*/ 754 * a context."*/
706 trace_switch_mm(engine, to); 755 trace_switch_mm(engine, to);
707 ret = to->ppgtt->switch_mm(to->ppgtt, req); 756 ret = ppgtt->switch_mm(ppgtt, req);
708 if (ret) 757 if (ret)
709 goto unpin_out; 758 goto unpin_out;
710 } 759 }
711 760
712 if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) 761 if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
713 /* NB: If we inhibit the restore, the context is not allowed to 762 /* NB: If we inhibit the restore, the context is not allowed to
714 * die because future work may end up depending on valid address 763 * die because future work may end up depending on valid address
715 * space. This means we must enforce that a page table load 764 * space. This means we must enforce that a page table load
716 * occur when this occurs. */ 765 * occur when this occurs. */
717 hw_flags = MI_RESTORE_INHIBIT; 766 hw_flags = MI_RESTORE_INHIBIT;
718 else if (to->ppgtt && 767 else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
719 intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
720 hw_flags = MI_FORCE_RESTORE; 768 hw_flags = MI_FORCE_RESTORE;
721 else 769 else
722 hw_flags = 0; 770 hw_flags = 0;
723 771
724 /* We should never emit switch_mm more than once */
725 WARN_ON(needs_pd_load_pre(engine, to) &&
726 needs_pd_load_post(to, hw_flags));
727
728 if (to != from || (hw_flags & MI_FORCE_RESTORE)) { 772 if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
729 ret = mi_set_context(req, hw_flags); 773 ret = mi_set_context(req, hw_flags);
730 if (ret) 774 if (ret)
@@ -738,8 +782,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
738 * MI_SET_CONTEXT instead of when the next seqno has completed. 782 * MI_SET_CONTEXT instead of when the next seqno has completed.
739 */ 783 */
740 if (from != NULL) { 784 if (from != NULL) {
741 from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 785 from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
742 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req); 786 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
743 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 787 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
744 * whole damn pipeline, we don't need to explicitly mark the 788 * whole damn pipeline, we don't need to explicitly mark the
745 * object dirty. The only exception is that the context must be 789 * object dirty. The only exception is that the context must be
@@ -747,10 +791,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
747 * able to defer doing this until we know the object would be 791 * able to defer doing this until we know the object would be
748 * swapped, but there is no way to do that yet. 792 * swapped, but there is no way to do that yet.
749 */ 793 */
750 from->legacy_hw_ctx.rcs_state->dirty = 1; 794 from->engine[RCS].state->dirty = 1;
751 795
752 /* obj is kept alive until the next request by its active ref */ 796 /* obj is kept alive until the next request by its active ref */
753 i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); 797 i915_gem_object_ggtt_unpin(from->engine[RCS].state);
754 i915_gem_context_unreference(from); 798 i915_gem_context_unreference(from);
755 } 799 }
756 i915_gem_context_reference(to); 800 i915_gem_context_reference(to);
@@ -759,9 +803,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
759 /* GEN8 does *not* require an explicit reload if the PDPs have been 803 /* GEN8 does *not* require an explicit reload if the PDPs have been
760 * setup, and we do not wish to move them. 804 * setup, and we do not wish to move them.
761 */ 805 */
762 if (needs_pd_load_post(to, hw_flags)) { 806 if (needs_pd_load_post(ppgtt, to, hw_flags)) {
763 trace_switch_mm(engine, to); 807 trace_switch_mm(engine, to);
764 ret = to->ppgtt->switch_mm(to->ppgtt, req); 808 ret = ppgtt->switch_mm(ppgtt, req);
765 /* The hardware context switch is emitted, but we haven't 809 /* The hardware context switch is emitted, but we haven't
766 * actually changed the state - so it's probably safe to bail 810 * actually changed the state - so it's probably safe to bail
767 * here. Still, let the user know something dangerous has 811 * here. Still, let the user know something dangerous has
@@ -771,33 +815,33 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
771 return ret; 815 return ret;
772 } 816 }
773 817
774 if (to->ppgtt) 818 if (ppgtt)
775 to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); 819 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
776 820
777 for (i = 0; i < MAX_L3_SLICES; i++) { 821 for (i = 0; i < MAX_L3_SLICES; i++) {
778 if (!(to->remap_slice & (1<<i))) 822 if (!(to->remap_slice & (1<<i)))
779 continue; 823 continue;
780 824
781 ret = i915_gem_l3_remap(req, i); 825 ret = remap_l3(req, i);
782 if (ret) 826 if (ret)
783 return ret; 827 return ret;
784 828
785 to->remap_slice &= ~(1<<i); 829 to->remap_slice &= ~(1<<i);
786 } 830 }
787 831
788 if (!to->legacy_hw_ctx.initialized) { 832 if (!to->engine[RCS].initialised) {
789 if (engine->init_context) { 833 if (engine->init_context) {
790 ret = engine->init_context(req); 834 ret = engine->init_context(req);
791 if (ret) 835 if (ret)
792 return ret; 836 return ret;
793 } 837 }
794 to->legacy_hw_ctx.initialized = true; 838 to->engine[RCS].initialised = true;
795 } 839 }
796 840
797 return 0; 841 return 0;
798 842
799unpin_out: 843unpin_out:
800 i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); 844 i915_gem_object_ggtt_unpin(to->engine[RCS].state);
801 return ret; 845 return ret;
802} 846}
803 847
@@ -817,25 +861,24 @@ unpin_out:
817int i915_switch_context(struct drm_i915_gem_request *req) 861int i915_switch_context(struct drm_i915_gem_request *req)
818{ 862{
819 struct intel_engine_cs *engine = req->engine; 863 struct intel_engine_cs *engine = req->engine;
820 struct drm_i915_private *dev_priv = req->i915;
821 864
822 WARN_ON(i915.enable_execlists); 865 WARN_ON(i915.enable_execlists);
823 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 866 lockdep_assert_held(&req->i915->dev->struct_mutex);
824 867
825 if (engine->id != RCS || 868 if (!req->ctx->engine[engine->id].state) {
826 req->ctx->legacy_hw_ctx.rcs_state == NULL) { 869 struct i915_gem_context *to = req->ctx;
827 struct intel_context *to = req->ctx; 870 struct i915_hw_ppgtt *ppgtt =
871 to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
828 872
829 if (needs_pd_load_pre(engine, to)) { 873 if (needs_pd_load_pre(ppgtt, engine, to)) {
830 int ret; 874 int ret;
831 875
832 trace_switch_mm(engine, to); 876 trace_switch_mm(engine, to);
833 ret = to->ppgtt->switch_mm(to->ppgtt, req); 877 ret = ppgtt->switch_mm(ppgtt, req);
834 if (ret) 878 if (ret)
835 return ret; 879 return ret;
836 880
837 /* Doing a PD load always reloads the page dirs */ 881 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
838 to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
839 } 882 }
840 883
841 if (to != engine->last_context) { 884 if (to != engine->last_context) {
@@ -861,7 +904,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
861{ 904{
862 struct drm_i915_gem_context_create *args = data; 905 struct drm_i915_gem_context_create *args = data;
863 struct drm_i915_file_private *file_priv = file->driver_priv; 906 struct drm_i915_file_private *file_priv = file->driver_priv;
864 struct intel_context *ctx; 907 struct i915_gem_context *ctx;
865 int ret; 908 int ret;
866 909
867 if (!contexts_enabled(dev)) 910 if (!contexts_enabled(dev))
@@ -890,7 +933,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
890{ 933{
891 struct drm_i915_gem_context_destroy *args = data; 934 struct drm_i915_gem_context_destroy *args = data;
892 struct drm_i915_file_private *file_priv = file->driver_priv; 935 struct drm_i915_file_private *file_priv = file->driver_priv;
893 struct intel_context *ctx; 936 struct i915_gem_context *ctx;
894 int ret; 937 int ret;
895 938
896 if (args->pad != 0) 939 if (args->pad != 0)
@@ -903,13 +946,13 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
903 if (ret) 946 if (ret)
904 return ret; 947 return ret;
905 948
906 ctx = i915_gem_context_get(file_priv, args->ctx_id); 949 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
907 if (IS_ERR(ctx)) { 950 if (IS_ERR(ctx)) {
908 mutex_unlock(&dev->struct_mutex); 951 mutex_unlock(&dev->struct_mutex);
909 return PTR_ERR(ctx); 952 return PTR_ERR(ctx);
910 } 953 }
911 954
912 idr_remove(&ctx->file_priv->context_idr, ctx->user_handle); 955 idr_remove(&file_priv->context_idr, ctx->user_handle);
913 i915_gem_context_unreference(ctx); 956 i915_gem_context_unreference(ctx);
914 mutex_unlock(&dev->struct_mutex); 957 mutex_unlock(&dev->struct_mutex);
915 958
@@ -922,14 +965,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
922{ 965{
923 struct drm_i915_file_private *file_priv = file->driver_priv; 966 struct drm_i915_file_private *file_priv = file->driver_priv;
924 struct drm_i915_gem_context_param *args = data; 967 struct drm_i915_gem_context_param *args = data;
925 struct intel_context *ctx; 968 struct i915_gem_context *ctx;
926 int ret; 969 int ret;
927 970
928 ret = i915_mutex_lock_interruptible(dev); 971 ret = i915_mutex_lock_interruptible(dev);
929 if (ret) 972 if (ret)
930 return ret; 973 return ret;
931 974
932 ctx = i915_gem_context_get(file_priv, args->ctx_id); 975 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
933 if (IS_ERR(ctx)) { 976 if (IS_ERR(ctx)) {
934 mutex_unlock(&dev->struct_mutex); 977 mutex_unlock(&dev->struct_mutex);
935 return PTR_ERR(ctx); 978 return PTR_ERR(ctx);
@@ -965,14 +1008,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
965{ 1008{
966 struct drm_i915_file_private *file_priv = file->driver_priv; 1009 struct drm_i915_file_private *file_priv = file->driver_priv;
967 struct drm_i915_gem_context_param *args = data; 1010 struct drm_i915_gem_context_param *args = data;
968 struct intel_context *ctx; 1011 struct i915_gem_context *ctx;
969 int ret; 1012 int ret;
970 1013
971 ret = i915_mutex_lock_interruptible(dev); 1014 ret = i915_mutex_lock_interruptible(dev);
972 if (ret) 1015 if (ret)
973 return ret; 1016 return ret;
974 1017
975 ctx = i915_gem_context_get(file_priv, args->ctx_id); 1018 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
976 if (IS_ERR(ctx)) { 1019 if (IS_ERR(ctx)) {
977 mutex_unlock(&dev->struct_mutex); 1020 mutex_unlock(&dev->struct_mutex);
978 return PTR_ERR(ctx); 1021 return PTR_ERR(ctx);
@@ -1004,3 +1047,42 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
1004 1047
1005 return ret; 1048 return ret;
1006} 1049}
1050
1051int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
1052 void *data, struct drm_file *file)
1053{
1054 struct drm_i915_private *dev_priv = dev->dev_private;
1055 struct drm_i915_reset_stats *args = data;
1056 struct i915_ctx_hang_stats *hs;
1057 struct i915_gem_context *ctx;
1058 int ret;
1059
1060 if (args->flags || args->pad)
1061 return -EINVAL;
1062
1063 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1064 return -EPERM;
1065
1066 ret = i915_mutex_lock_interruptible(dev);
1067 if (ret)
1068 return ret;
1069
1070 ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
1071 if (IS_ERR(ctx)) {
1072 mutex_unlock(&dev->struct_mutex);
1073 return PTR_ERR(ctx);
1074 }
1075 hs = &ctx->hang_stats;
1076
1077 if (capable(CAP_SYS_ADMIN))
1078 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1079 else
1080 args->reset_count = 0;
1081
1082 args->batch_active = hs->batch_active;
1083 args->batch_pending = hs->batch_pending;
1084
1085 mutex_unlock(&dev->struct_mutex);
1086
1087 return 0;
1088}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index ea1f8d1bd228..b144c3f5c650 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -154,7 +154,7 @@ none:
154 if (ret) 154 if (ret)
155 return ret; 155 return ret;
156 156
157 i915_gem_retire_requests(dev); 157 i915_gem_retire_requests(to_i915(dev));
158 goto search_again; 158 goto search_again;
159 } 159 }
160 160
@@ -265,7 +265,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
265 if (ret) 265 if (ret)
266 return ret; 266 return ret;
267 267
268 i915_gem_retire_requests(vm->dev); 268 i915_gem_retire_requests(to_i915(vm->dev));
269 269
270 WARN_ON(!list_empty(&vm->active_list)); 270 WARN_ON(!list_empty(&vm->active_list));
271 } 271 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 33df74d98269..8097698b9622 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -714,7 +714,7 @@ eb_vma_misplaced(struct i915_vma *vma)
714static int 714static int
715i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, 715i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
716 struct list_head *vmas, 716 struct list_head *vmas,
717 struct intel_context *ctx, 717 struct i915_gem_context *ctx,
718 bool *need_relocs) 718 bool *need_relocs)
719{ 719{
720 struct drm_i915_gem_object *obj; 720 struct drm_i915_gem_object *obj;
@@ -722,7 +722,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
722 struct i915_address_space *vm; 722 struct i915_address_space *vm;
723 struct list_head ordered_vmas; 723 struct list_head ordered_vmas;
724 struct list_head pinned_vmas; 724 struct list_head pinned_vmas;
725 bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4; 725 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
726 int retry; 726 int retry;
727 727
728 i915_gem_retire_requests_ring(engine); 728 i915_gem_retire_requests_ring(engine);
@@ -826,7 +826,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
826 struct intel_engine_cs *engine, 826 struct intel_engine_cs *engine,
827 struct eb_vmas *eb, 827 struct eb_vmas *eb,
828 struct drm_i915_gem_exec_object2 *exec, 828 struct drm_i915_gem_exec_object2 *exec,
829 struct intel_context *ctx) 829 struct i915_gem_context *ctx)
830{ 830{
831 struct drm_i915_gem_relocation_entry *reloc; 831 struct drm_i915_gem_relocation_entry *reloc;
832 struct i915_address_space *vm; 832 struct i915_address_space *vm;
@@ -963,7 +963,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
963 } 963 }
964 964
965 if (flush_chipset) 965 if (flush_chipset)
966 i915_gem_chipset_flush(req->engine->dev); 966 i915_gem_chipset_flush(req->engine->i915);
967 967
968 if (flush_domains & I915_GEM_DOMAIN_GTT) 968 if (flush_domains & I915_GEM_DOMAIN_GTT)
969 wmb(); 969 wmb();
@@ -1063,17 +1063,17 @@ validate_exec_list(struct drm_device *dev,
1063 return 0; 1063 return 0;
1064} 1064}
1065 1065
1066static struct intel_context * 1066static struct i915_gem_context *
1067i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, 1067i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1068 struct intel_engine_cs *engine, const u32 ctx_id) 1068 struct intel_engine_cs *engine, const u32 ctx_id)
1069{ 1069{
1070 struct intel_context *ctx = NULL; 1070 struct i915_gem_context *ctx = NULL;
1071 struct i915_ctx_hang_stats *hs; 1071 struct i915_ctx_hang_stats *hs;
1072 1072
1073 if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE) 1073 if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
1074 return ERR_PTR(-EINVAL); 1074 return ERR_PTR(-EINVAL);
1075 1075
1076 ctx = i915_gem_context_get(file->driver_priv, ctx_id); 1076 ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
1077 if (IS_ERR(ctx)) 1077 if (IS_ERR(ctx))
1078 return ctx; 1078 return ctx;
1079 1079
@@ -1083,14 +1083,6 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1083 return ERR_PTR(-EIO); 1083 return ERR_PTR(-EIO);
1084 } 1084 }
1085 1085
1086 if (i915.enable_execlists && !ctx->engine[engine->id].state) {
1087 int ret = intel_lr_context_deferred_alloc(ctx, engine);
1088 if (ret) {
1089 DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
1090 return ERR_PTR(ret);
1091 }
1092 }
1093
1094 return ctx; 1086 return ctx;
1095} 1087}
1096 1088
@@ -1125,7 +1117,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1125 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { 1117 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
1126 i915_gem_request_assign(&obj->last_fenced_req, req); 1118 i915_gem_request_assign(&obj->last_fenced_req, req);
1127 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { 1119 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
1128 struct drm_i915_private *dev_priv = to_i915(engine->dev); 1120 struct drm_i915_private *dev_priv = engine->i915;
1129 list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, 1121 list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
1130 &dev_priv->mm.fence_list); 1122 &dev_priv->mm.fence_list);
1131 } 1123 }
@@ -1436,7 +1428,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1436 struct drm_i915_gem_object *batch_obj; 1428 struct drm_i915_gem_object *batch_obj;
1437 struct drm_i915_gem_exec_object2 shadow_exec_entry; 1429 struct drm_i915_gem_exec_object2 shadow_exec_entry;
1438 struct intel_engine_cs *engine; 1430 struct intel_engine_cs *engine;
1439 struct intel_context *ctx; 1431 struct i915_gem_context *ctx;
1440 struct i915_address_space *vm; 1432 struct i915_address_space *vm;
1441 struct i915_execbuffer_params params_master; /* XXX: will be removed later */ 1433 struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1442 struct i915_execbuffer_params *params = &params_master; 1434 struct i915_execbuffer_params *params = &params_master;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index a2b938ec01a7..2b6bdc267fb5 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -745,15 +745,15 @@ i915_gem_swizzle_page(struct page *page)
745void 745void
746i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) 746i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
747{ 747{
748 struct sg_page_iter sg_iter; 748 struct sgt_iter sgt_iter;
749 struct page *page;
749 int i; 750 int i;
750 751
751 if (obj->bit_17 == NULL) 752 if (obj->bit_17 == NULL)
752 return; 753 return;
753 754
754 i = 0; 755 i = 0;
755 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 756 for_each_sgt_page(page, sgt_iter, obj->pages) {
756 struct page *page = sg_page_iter_page(&sg_iter);
757 char new_bit_17 = page_to_phys(page) >> 17; 757 char new_bit_17 = page_to_phys(page) >> 17;
758 if ((new_bit_17 & 0x1) != 758 if ((new_bit_17 & 0x1) !=
759 (test_bit(i, obj->bit_17) != 0)) { 759 (test_bit(i, obj->bit_17) != 0)) {
@@ -775,7 +775,8 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
775void 775void
776i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) 776i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
777{ 777{
778 struct sg_page_iter sg_iter; 778 struct sgt_iter sgt_iter;
779 struct page *page;
779 int page_count = obj->base.size >> PAGE_SHIFT; 780 int page_count = obj->base.size >> PAGE_SHIFT;
780 int i; 781 int i;
781 782
@@ -790,8 +791,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
790 } 791 }
791 792
792 i = 0; 793 i = 0;
793 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 794
794 if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17)) 795 for_each_sgt_page(page, sgt_iter, obj->pages) {
796 if (page_to_phys(page) & (1 << 17))
795 __set_bit(i, obj->bit_17); 797 __set_bit(i, obj->bit_17);
796 else 798 else
797 __clear_bit(i, obj->bit_17); 799 __clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 92acdff9dad3..46684779d4d6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -93,6 +93,13 @@
93 * 93 *
94 */ 94 */
95 95
96static inline struct i915_ggtt *
97i915_vm_to_ggtt(struct i915_address_space *vm)
98{
99 GEM_BUG_ON(!i915_is_ggtt(vm));
100 return container_of(vm, struct i915_ggtt, base);
101}
102
96static int 103static int
97i915_get_ggtt_vma_pages(struct i915_vma *vma); 104i915_get_ggtt_vma_pages(struct i915_vma *vma);
98 105
@@ -103,25 +110,29 @@ const struct i915_ggtt_view i915_ggtt_view_rotated = {
103 .type = I915_GGTT_VIEW_ROTATED, 110 .type = I915_GGTT_VIEW_ROTATED,
104}; 111};
105 112
106static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) 113int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
114 int enable_ppgtt)
107{ 115{
108 bool has_aliasing_ppgtt; 116 bool has_aliasing_ppgtt;
109 bool has_full_ppgtt; 117 bool has_full_ppgtt;
110 bool has_full_48bit_ppgtt; 118 bool has_full_48bit_ppgtt;
111 119
112 has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; 120 has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6;
113 has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; 121 has_full_ppgtt = INTEL_GEN(dev_priv) >= 7;
114 has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9; 122 has_full_48bit_ppgtt =
123 IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
115 124
116 if (intel_vgpu_active(dev)) 125 if (intel_vgpu_active(dev_priv))
117 has_full_ppgtt = false; /* emulation is too hard */ 126 has_full_ppgtt = false; /* emulation is too hard */
118 127
128 if (!has_aliasing_ppgtt)
129 return 0;
130
119 /* 131 /*
120 * We don't allow disabling PPGTT for gen9+ as it's a requirement for 132 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
121 * execlists, the sole mechanism available to submit work. 133 * execlists, the sole mechanism available to submit work.
122 */ 134 */
123 if (INTEL_INFO(dev)->gen < 9 && 135 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
124 (enable_ppgtt == 0 || !has_aliasing_ppgtt))
125 return 0; 136 return 0;
126 137
127 if (enable_ppgtt == 1) 138 if (enable_ppgtt == 1)
@@ -135,19 +146,19 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
135 146
136#ifdef CONFIG_INTEL_IOMMU 147#ifdef CONFIG_INTEL_IOMMU
137 /* Disable ppgtt on SNB if VT-d is on. */ 148 /* Disable ppgtt on SNB if VT-d is on. */
138 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { 149 if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
139 DRM_INFO("Disabling PPGTT because VT-d is on\n"); 150 DRM_INFO("Disabling PPGTT because VT-d is on\n");
140 return 0; 151 return 0;
141 } 152 }
142#endif 153#endif
143 154
144 /* Early VLV doesn't have this */ 155 /* Early VLV doesn't have this */
145 if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) { 156 if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) {
146 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); 157 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
147 return 0; 158 return 0;
148 } 159 }
149 160
150 if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) 161 if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
151 return has_full_48bit_ppgtt ? 3 : 2; 162 return has_full_48bit_ppgtt ? 3 : 2;
152 else 163 else
153 return has_aliasing_ppgtt ? 1 : 0; 164 return has_aliasing_ppgtt ? 1 : 0;
@@ -866,6 +877,7 @@ static void gen8_free_page_tables(struct drm_device *dev,
866static int gen8_init_scratch(struct i915_address_space *vm) 877static int gen8_init_scratch(struct i915_address_space *vm)
867{ 878{
868 struct drm_device *dev = vm->dev; 879 struct drm_device *dev = vm->dev;
880 int ret;
869 881
870 vm->scratch_page = alloc_scratch_page(dev); 882 vm->scratch_page = alloc_scratch_page(dev);
871 if (IS_ERR(vm->scratch_page)) 883 if (IS_ERR(vm->scratch_page))
@@ -873,24 +885,21 @@ static int gen8_init_scratch(struct i915_address_space *vm)
873 885
874 vm->scratch_pt = alloc_pt(dev); 886 vm->scratch_pt = alloc_pt(dev);
875 if (IS_ERR(vm->scratch_pt)) { 887 if (IS_ERR(vm->scratch_pt)) {
876 free_scratch_page(dev, vm->scratch_page); 888 ret = PTR_ERR(vm->scratch_pt);
877 return PTR_ERR(vm->scratch_pt); 889 goto free_scratch_page;
878 } 890 }
879 891
880 vm->scratch_pd = alloc_pd(dev); 892 vm->scratch_pd = alloc_pd(dev);
881 if (IS_ERR(vm->scratch_pd)) { 893 if (IS_ERR(vm->scratch_pd)) {
882 free_pt(dev, vm->scratch_pt); 894 ret = PTR_ERR(vm->scratch_pd);
883 free_scratch_page(dev, vm->scratch_page); 895 goto free_pt;
884 return PTR_ERR(vm->scratch_pd);
885 } 896 }
886 897
887 if (USES_FULL_48BIT_PPGTT(dev)) { 898 if (USES_FULL_48BIT_PPGTT(dev)) {
888 vm->scratch_pdp = alloc_pdp(dev); 899 vm->scratch_pdp = alloc_pdp(dev);
889 if (IS_ERR(vm->scratch_pdp)) { 900 if (IS_ERR(vm->scratch_pdp)) {
890 free_pd(dev, vm->scratch_pd); 901 ret = PTR_ERR(vm->scratch_pdp);
891 free_pt(dev, vm->scratch_pt); 902 goto free_pd;
892 free_scratch_page(dev, vm->scratch_page);
893 return PTR_ERR(vm->scratch_pdp);
894 } 903 }
895 } 904 }
896 905
@@ -900,6 +909,15 @@ static int gen8_init_scratch(struct i915_address_space *vm)
900 gen8_initialize_pdp(vm, vm->scratch_pdp); 909 gen8_initialize_pdp(vm, vm->scratch_pdp);
901 910
902 return 0; 911 return 0;
912
913free_pd:
914 free_pd(dev, vm->scratch_pd);
915free_pt:
916 free_pt(dev, vm->scratch_pt);
917free_scratch_page:
918 free_scratch_page(dev, vm->scratch_page);
919
920 return ret;
903} 921}
904 922
905static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) 923static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
@@ -978,7 +996,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
978{ 996{
979 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 997 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
980 998
981 if (intel_vgpu_active(vm->dev)) 999 if (intel_vgpu_active(to_i915(vm->dev)))
982 gen8_ppgtt_notify_vgt(ppgtt, false); 1000 gen8_ppgtt_notify_vgt(ppgtt, false);
983 1001
984 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) 1002 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
@@ -1529,14 +1547,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1529 0, 0, 1547 0, 0,
1530 GEN8_PML4E_SHIFT); 1548 GEN8_PML4E_SHIFT);
1531 1549
1532 if (intel_vgpu_active(ppgtt->base.dev)) { 1550 if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
1533 ret = gen8_preallocate_top_level_pdps(ppgtt); 1551 ret = gen8_preallocate_top_level_pdps(ppgtt);
1534 if (ret) 1552 if (ret)
1535 goto free_scratch; 1553 goto free_scratch;
1536 } 1554 }
1537 } 1555 }
1538 1556
1539 if (intel_vgpu_active(ppgtt->base.dev)) 1557 if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
1540 gen8_ppgtt_notify_vgt(ppgtt, true); 1558 gen8_ppgtt_notify_vgt(ppgtt, true);
1541 1559
1542 return 0; 1560 return 0;
@@ -1821,20 +1839,19 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1821 enum i915_cache_level cache_level, u32 flags) 1839 enum i915_cache_level cache_level, u32 flags)
1822{ 1840{
1823 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1841 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1824 gen6_pte_t *pt_vaddr;
1825 unsigned first_entry = start >> PAGE_SHIFT; 1842 unsigned first_entry = start >> PAGE_SHIFT;
1826 unsigned act_pt = first_entry / GEN6_PTES; 1843 unsigned act_pt = first_entry / GEN6_PTES;
1827 unsigned act_pte = first_entry % GEN6_PTES; 1844 unsigned act_pte = first_entry % GEN6_PTES;
1828 struct sg_page_iter sg_iter; 1845 gen6_pte_t *pt_vaddr = NULL;
1846 struct sgt_iter sgt_iter;
1847 dma_addr_t addr;
1829 1848
1830 pt_vaddr = NULL; 1849 for_each_sgt_dma(addr, sgt_iter, pages) {
1831 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
1832 if (pt_vaddr == NULL) 1850 if (pt_vaddr == NULL)
1833 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); 1851 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1834 1852
1835 pt_vaddr[act_pte] = 1853 pt_vaddr[act_pte] =
1836 vm->pte_encode(sg_page_iter_dma_address(&sg_iter), 1854 vm->pte_encode(addr, cache_level, true, flags);
1837 cache_level, true, flags);
1838 1855
1839 if (++act_pte == GEN6_PTES) { 1856 if (++act_pte == GEN6_PTES) {
1840 kunmap_px(ppgtt, pt_vaddr); 1857 kunmap_px(ppgtt, pt_vaddr);
@@ -1843,6 +1860,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1843 act_pte = 0; 1860 act_pte = 0;
1844 } 1861 }
1845 } 1862 }
1863
1846 if (pt_vaddr) 1864 if (pt_vaddr)
1847 kunmap_px(ppgtt, pt_vaddr); 1865 kunmap_px(ppgtt, pt_vaddr);
1848} 1866}
@@ -2064,7 +2082,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
2064 } else 2082 } else
2065 BUG(); 2083 BUG();
2066 2084
2067 if (intel_vgpu_active(dev)) 2085 if (intel_vgpu_active(dev_priv))
2068 ppgtt->switch_mm = vgpu_mm_switch; 2086 ppgtt->switch_mm = vgpu_mm_switch;
2069 2087
2070 ret = gen6_ppgtt_alloc(ppgtt); 2088 ret = gen6_ppgtt_alloc(ppgtt);
@@ -2140,7 +2158,7 @@ static void gtt_write_workarounds(struct drm_device *dev)
2140 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); 2158 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2141} 2159}
2142 2160
2143int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) 2161static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2144{ 2162{
2145 struct drm_i915_private *dev_priv = dev->dev_private; 2163 struct drm_i915_private *dev_priv = dev->dev_private;
2146 int ret = 0; 2164 int ret = 0;
@@ -2179,20 +2197,6 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
2179 return 0; 2197 return 0;
2180} 2198}
2181 2199
2182int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
2183{
2184 struct drm_i915_private *dev_priv = req->i915;
2185 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2186
2187 if (i915.enable_execlists)
2188 return 0;
2189
2190 if (!ppgtt)
2191 return 0;
2192
2193 return ppgtt->switch_mm(ppgtt, req);
2194}
2195
2196struct i915_hw_ppgtt * 2200struct i915_hw_ppgtt *
2197i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) 2201i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
2198{ 2202{
@@ -2275,12 +2279,11 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
2275 dev_priv->mm.interruptible = interruptible; 2279 dev_priv->mm.interruptible = interruptible;
2276} 2280}
2277 2281
2278void i915_check_and_clear_faults(struct drm_device *dev) 2282void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
2279{ 2283{
2280 struct drm_i915_private *dev_priv = dev->dev_private;
2281 struct intel_engine_cs *engine; 2284 struct intel_engine_cs *engine;
2282 2285
2283 if (INTEL_INFO(dev)->gen < 6) 2286 if (INTEL_INFO(dev_priv)->gen < 6)
2284 return; 2287 return;
2285 2288
2286 for_each_engine(engine, dev_priv) { 2289 for_each_engine(engine, dev_priv) {
@@ -2324,7 +2327,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
2324 if (INTEL_INFO(dev)->gen < 6) 2327 if (INTEL_INFO(dev)->gen < 6)
2325 return; 2328 return;
2326 2329
2327 i915_check_and_clear_faults(dev); 2330 i915_check_and_clear_faults(dev_priv);
2328 2331
2329 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, 2332 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
2330 true); 2333 true);
@@ -2358,23 +2361,21 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2358 enum i915_cache_level level, u32 unused) 2361 enum i915_cache_level level, u32 unused)
2359{ 2362{
2360 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2363 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2361 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2364 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2362 unsigned first_entry = start >> PAGE_SHIFT; 2365 struct sgt_iter sgt_iter;
2363 gen8_pte_t __iomem *gtt_entries = 2366 gen8_pte_t __iomem *gtt_entries;
2364 (gen8_pte_t __iomem *)ggtt->gsm + first_entry; 2367 gen8_pte_t gtt_entry;
2365 int i = 0; 2368 dma_addr_t addr;
2366 struct sg_page_iter sg_iter;
2367 dma_addr_t addr = 0; /* shut up gcc */
2368 int rpm_atomic_seq; 2369 int rpm_atomic_seq;
2370 int i = 0;
2369 2371
2370 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2372 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2371 2373
2372 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 2374 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
2373 addr = sg_dma_address(sg_iter.sg) + 2375
2374 (sg_iter.sg_pgoffset << PAGE_SHIFT); 2376 for_each_sgt_dma(addr, sgt_iter, st) {
2375 gen8_set_pte(&gtt_entries[i], 2377 gtt_entry = gen8_pte_encode(addr, level, true);
2376 gen8_pte_encode(addr, level, true)); 2378 gen8_set_pte(&gtt_entries[i++], gtt_entry);
2377 i++;
2378 } 2379 }
2379 2380
2380 /* 2381 /*
@@ -2385,8 +2386,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2385 * hardware should work, we must keep this posting read for paranoia. 2386 * hardware should work, we must keep this posting read for paranoia.
2386 */ 2387 */
2387 if (i != 0) 2388 if (i != 0)
2388 WARN_ON(readq(&gtt_entries[i-1]) 2389 WARN_ON(readq(&gtt_entries[i-1]) != gtt_entry);
2389 != gen8_pte_encode(addr, level, true));
2390 2390
2391 /* This next bit makes the above posting read even more important. We 2391 /* This next bit makes the above posting read even more important. We
2392 * want to flush the TLBs only after we're certain all the PTE updates 2392 * want to flush the TLBs only after we're certain all the PTE updates
@@ -2436,21 +2436,21 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2436 enum i915_cache_level level, u32 flags) 2436 enum i915_cache_level level, u32 flags)
2437{ 2437{
2438 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2438 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2439 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2439 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2440 unsigned first_entry = start >> PAGE_SHIFT; 2440 struct sgt_iter sgt_iter;
2441 gen6_pte_t __iomem *gtt_entries = 2441 gen6_pte_t __iomem *gtt_entries;
2442 (gen6_pte_t __iomem *)ggtt->gsm + first_entry; 2442 gen6_pte_t gtt_entry;
2443 int i = 0; 2443 dma_addr_t addr;
2444 struct sg_page_iter sg_iter;
2445 dma_addr_t addr = 0;
2446 int rpm_atomic_seq; 2444 int rpm_atomic_seq;
2445 int i = 0;
2447 2446
2448 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2447 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2449 2448
2450 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 2449 gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
2451 addr = sg_page_iter_dma_address(&sg_iter); 2450
2452 iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]); 2451 for_each_sgt_dma(addr, sgt_iter, st) {
2453 i++; 2452 gtt_entry = vm->pte_encode(addr, level, true, flags);
2453 iowrite32(gtt_entry, &gtt_entries[i++]);
2454 } 2454 }
2455 2455
2456 /* XXX: This serves as a posting read to make sure that the PTE has 2456 /* XXX: This serves as a posting read to make sure that the PTE has
@@ -2459,10 +2459,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2459 * of NUMA access patterns. Therefore, even with the way we assume 2459 * of NUMA access patterns. Therefore, even with the way we assume
2460 * hardware should work, we must keep this posting read for paranoia. 2460 * hardware should work, we must keep this posting read for paranoia.
2461 */ 2461 */
2462 if (i != 0) { 2462 if (i != 0)
2463 unsigned long gtt = readl(&gtt_entries[i-1]); 2463 WARN_ON(readl(&gtt_entries[i-1]) != gtt_entry);
2464 WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
2465 }
2466 2464
2467 /* This next bit makes the above posting read even more important. We 2465 /* This next bit makes the above posting read even more important. We
2468 * want to flush the TLBs only after we're certain all the PTE updates 2466 * want to flush the TLBs only after we're certain all the PTE updates
@@ -2474,13 +2472,20 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2474 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); 2472 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2475} 2473}
2476 2474
2475static void nop_clear_range(struct i915_address_space *vm,
2476 uint64_t start,
2477 uint64_t length,
2478 bool use_scratch)
2479{
2480}
2481
2477static void gen8_ggtt_clear_range(struct i915_address_space *vm, 2482static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2478 uint64_t start, 2483 uint64_t start,
2479 uint64_t length, 2484 uint64_t length,
2480 bool use_scratch) 2485 bool use_scratch)
2481{ 2486{
2482 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2487 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2483 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2488 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2484 unsigned first_entry = start >> PAGE_SHIFT; 2489 unsigned first_entry = start >> PAGE_SHIFT;
2485 unsigned num_entries = length >> PAGE_SHIFT; 2490 unsigned num_entries = length >> PAGE_SHIFT;
2486 gen8_pte_t scratch_pte, __iomem *gtt_base = 2491 gen8_pte_t scratch_pte, __iomem *gtt_base =
@@ -2512,7 +2517,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2512 bool use_scratch) 2517 bool use_scratch)
2513{ 2518{
2514 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2519 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2515 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2520 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2516 unsigned first_entry = start >> PAGE_SHIFT; 2521 unsigned first_entry = start >> PAGE_SHIFT;
2517 unsigned num_entries = length >> PAGE_SHIFT; 2522 unsigned num_entries = length >> PAGE_SHIFT;
2518 gen6_pte_t scratch_pte, __iomem *gtt_base = 2523 gen6_pte_t scratch_pte, __iomem *gtt_base =
@@ -2727,7 +2732,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
2727 i915_address_space_init(&ggtt->base, dev_priv); 2732 i915_address_space_init(&ggtt->base, dev_priv);
2728 ggtt->base.total += PAGE_SIZE; 2733 ggtt->base.total += PAGE_SIZE;
2729 2734
2730 if (intel_vgpu_active(dev)) { 2735 if (intel_vgpu_active(dev_priv)) {
2731 ret = intel_vgt_balloon(dev); 2736 ret = intel_vgt_balloon(dev);
2732 if (ret) 2737 if (ret)
2733 return ret; 2738 return ret;
@@ -2831,7 +2836,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
2831 i915_gem_cleanup_stolen(dev); 2836 i915_gem_cleanup_stolen(dev);
2832 2837
2833 if (drm_mm_initialized(&ggtt->base.mm)) { 2838 if (drm_mm_initialized(&ggtt->base.mm)) {
2834 if (intel_vgpu_active(dev)) 2839 if (intel_vgpu_active(dev_priv))
2835 intel_vgt_deballoon(); 2840 intel_vgt_deballoon();
2836 2841
2837 drm_mm_takedown(&ggtt->base.mm); 2842 drm_mm_takedown(&ggtt->base.mm);
@@ -3069,14 +3074,17 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3069 3074
3070 ret = ggtt_probe_common(dev, ggtt->size); 3075 ret = ggtt_probe_common(dev, ggtt->size);
3071 3076
3072 ggtt->base.clear_range = gen8_ggtt_clear_range;
3073 if (IS_CHERRYVIEW(dev_priv))
3074 ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
3075 else
3076 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
3077 ggtt->base.bind_vma = ggtt_bind_vma; 3077 ggtt->base.bind_vma = ggtt_bind_vma;
3078 ggtt->base.unbind_vma = ggtt_unbind_vma; 3078 ggtt->base.unbind_vma = ggtt_unbind_vma;
3079 3079
3080 ggtt->base.clear_range = nop_clear_range;
3081 if (!USES_FULL_PPGTT(dev_priv))
3082 ggtt->base.clear_range = gen8_ggtt_clear_range;
3083
3084 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
3085 if (IS_CHERRYVIEW(dev_priv))
3086 ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
3087
3080 return ret; 3088 return ret;
3081} 3089}
3082 3090
@@ -3219,14 +3227,6 @@ int i915_ggtt_init_hw(struct drm_device *dev)
3219 if (intel_iommu_gfx_mapped) 3227 if (intel_iommu_gfx_mapped)
3220 DRM_INFO("VT-d active for gfx access\n"); 3228 DRM_INFO("VT-d active for gfx access\n");
3221#endif 3229#endif
3222 /*
3223 * i915.enable_ppgtt is read-only, so do an early pass to validate the
3224 * user's requested state against the hardware/driver capabilities. We
3225 * do this now so that we can print out any log messages once rather
3226 * than every time we check intel_enable_ppgtt().
3227 */
3228 i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
3229 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
3230 3230
3231 return 0; 3231 return 0;
3232 3232
@@ -3250,9 +3250,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3250 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3250 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3251 struct drm_i915_gem_object *obj; 3251 struct drm_i915_gem_object *obj;
3252 struct i915_vma *vma; 3252 struct i915_vma *vma;
3253 bool flush;
3254 3253
3255 i915_check_and_clear_faults(dev); 3254 i915_check_and_clear_faults(dev_priv);
3256 3255
3257 /* First fill our portion of the GTT with scratch pages */ 3256 /* First fill our portion of the GTT with scratch pages */
3258 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, 3257 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
@@ -3260,19 +3259,16 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3260 3259
3261 /* Cache flush objects bound into GGTT and rebind them. */ 3260 /* Cache flush objects bound into GGTT and rebind them. */
3262 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 3261 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
3263 flush = false;
3264 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3262 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3265 if (vma->vm != &ggtt->base) 3263 if (vma->vm != &ggtt->base)
3266 continue; 3264 continue;
3267 3265
3268 WARN_ON(i915_vma_bind(vma, obj->cache_level, 3266 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3269 PIN_UPDATE)); 3267 PIN_UPDATE));
3270
3271 flush = true;
3272 } 3268 }
3273 3269
3274 if (flush) 3270 if (obj->pin_display)
3275 i915_gem_clflush_object(obj, obj->pin_display); 3271 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
3276 } 3272 }
3277 3273
3278 if (INTEL_INFO(dev)->gen >= 8) { 3274 if (INTEL_INFO(dev)->gen >= 8) {
@@ -3398,9 +3394,11 @@ static struct sg_table *
3398intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, 3394intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3399 struct drm_i915_gem_object *obj) 3395 struct drm_i915_gem_object *obj)
3400{ 3396{
3397 const size_t n_pages = obj->base.size / PAGE_SIZE;
3401 unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height; 3398 unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
3402 unsigned int size_pages_uv; 3399 unsigned int size_pages_uv;
3403 struct sg_page_iter sg_iter; 3400 struct sgt_iter sgt_iter;
3401 dma_addr_t dma_addr;
3404 unsigned long i; 3402 unsigned long i;
3405 dma_addr_t *page_addr_list; 3403 dma_addr_t *page_addr_list;
3406 struct sg_table *st; 3404 struct sg_table *st;
@@ -3409,7 +3407,7 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3409 int ret = -ENOMEM; 3407 int ret = -ENOMEM;
3410 3408
3411 /* Allocate a temporary list of source pages for random access. */ 3409 /* Allocate a temporary list of source pages for random access. */
3412 page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE, 3410 page_addr_list = drm_malloc_gfp(n_pages,
3413 sizeof(dma_addr_t), 3411 sizeof(dma_addr_t),
3414 GFP_TEMPORARY); 3412 GFP_TEMPORARY);
3415 if (!page_addr_list) 3413 if (!page_addr_list)
@@ -3432,11 +3430,10 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3432 3430
3433 /* Populate source page list from the object. */ 3431 /* Populate source page list from the object. */
3434 i = 0; 3432 i = 0;
3435 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 3433 for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
3436 page_addr_list[i] = sg_page_iter_dma_address(&sg_iter); 3434 page_addr_list[i++] = dma_addr;
3437 i++;
3438 }
3439 3435
3436 GEM_BUG_ON(i != n_pages);
3440 st->nents = 0; 3437 st->nents = 0;
3441 sg = st->sgl; 3438 sg = st->sgl;
3442 3439
@@ -3634,3 +3631,29 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
3634 return obj->base.size; 3631 return obj->base.size;
3635 } 3632 }
3636} 3633}
3634
3635void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
3636{
3637 void __iomem *ptr;
3638
3639 lockdep_assert_held(&vma->vm->dev->struct_mutex);
3640 if (WARN_ON(!vma->obj->map_and_fenceable))
3641 return ERR_PTR(-ENODEV);
3642
3643 GEM_BUG_ON(!vma->is_ggtt);
3644 GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);
3645
3646 ptr = vma->iomap;
3647 if (ptr == NULL) {
3648 ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable,
3649 vma->node.start,
3650 vma->node.size);
3651 if (ptr == NULL)
3652 return ERR_PTR(-ENOMEM);
3653
3654 vma->iomap = ptr;
3655 }
3656
3657 vma->pin_count++;
3658 return ptr;
3659}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 0008543d55f6..62be77cac5cd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -34,6 +34,8 @@
34#ifndef __I915_GEM_GTT_H__ 34#ifndef __I915_GEM_GTT_H__
35#define __I915_GEM_GTT_H__ 35#define __I915_GEM_GTT_H__
36 36
37#include <linux/io-mapping.h>
38
37struct drm_i915_file_private; 39struct drm_i915_file_private;
38 40
39typedef uint32_t gen6_pte_t; 41typedef uint32_t gen6_pte_t;
@@ -175,6 +177,7 @@ struct i915_vma {
175 struct drm_mm_node node; 177 struct drm_mm_node node;
176 struct drm_i915_gem_object *obj; 178 struct drm_i915_gem_object *obj;
177 struct i915_address_space *vm; 179 struct i915_address_space *vm;
180 void __iomem *iomap;
178 181
179 /** Flags and address space this VMA is bound to */ 182 /** Flags and address space this VMA is bound to */
180#define GLOBAL_BIND (1<<0) 183#define GLOBAL_BIND (1<<0)
@@ -518,9 +521,7 @@ int i915_ggtt_enable_hw(struct drm_device *dev);
518void i915_gem_init_ggtt(struct drm_device *dev); 521void i915_gem_init_ggtt(struct drm_device *dev);
519void i915_ggtt_cleanup_hw(struct drm_device *dev); 522void i915_ggtt_cleanup_hw(struct drm_device *dev);
520 523
521int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
522int i915_ppgtt_init_hw(struct drm_device *dev); 524int i915_ppgtt_init_hw(struct drm_device *dev);
523int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
524void i915_ppgtt_release(struct kref *kref); 525void i915_ppgtt_release(struct kref *kref);
525struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev, 526struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
526 struct drm_i915_file_private *fpriv); 527 struct drm_i915_file_private *fpriv);
@@ -535,7 +536,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
535 kref_put(&ppgtt->ref, i915_ppgtt_release); 536 kref_put(&ppgtt->ref, i915_ppgtt_release);
536} 537}
537 538
538void i915_check_and_clear_faults(struct drm_device *dev); 539void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
539void i915_gem_suspend_gtt_mappings(struct drm_device *dev); 540void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
540void i915_gem_restore_gtt_mappings(struct drm_device *dev); 541void i915_gem_restore_gtt_mappings(struct drm_device *dev);
541 542
@@ -560,4 +561,36 @@ size_t
560i915_ggtt_view_size(struct drm_i915_gem_object *obj, 561i915_ggtt_view_size(struct drm_i915_gem_object *obj,
561 const struct i915_ggtt_view *view); 562 const struct i915_ggtt_view *view);
562 563
564/**
565 * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
566 * @vma: VMA to iomap
567 *
568 * The passed in VMA has to be pinned in the global GTT mappable region.
569 * An extra pinning of the VMA is acquired for the return iomapping,
570 * the caller must call i915_vma_unpin_iomap to relinquish the pinning
571 * after the iomapping is no longer required.
572 *
573 * Callers must hold the struct_mutex.
574 *
575 * Returns a valid iomapped pointer or ERR_PTR.
576 */
577void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
578
579/**
580 * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
581 * @vma: VMA to unpin
582 *
583 * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
584 *
585 * Callers must hold the struct_mutex. This function is only valid to be
586 * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
587 */
588static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
589{
590 lockdep_assert_held(&vma->vm->dev->struct_mutex);
591 GEM_BUG_ON(vma->pin_count == 0);
592 GEM_BUG_ON(vma->iomap == NULL);
593 vma->pin_count--;
594}
595
563#endif 596#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 71611bf21fca..7c93327b70fe 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -29,7 +29,7 @@
29#include "intel_renderstate.h" 29#include "intel_renderstate.h"
30 30
31static const struct intel_renderstate_rodata * 31static const struct intel_renderstate_rodata *
32render_state_get_rodata(struct drm_device *dev, const int gen) 32render_state_get_rodata(const int gen)
33{ 33{
34 switch (gen) { 34 switch (gen) {
35 case 6: 35 case 6:
@@ -45,21 +45,22 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
45 return NULL; 45 return NULL;
46} 46}
47 47
48static int render_state_init(struct render_state *so, struct drm_device *dev) 48static int render_state_init(struct render_state *so,
49 struct drm_i915_private *dev_priv)
49{ 50{
50 int ret; 51 int ret;
51 52
52 so->gen = INTEL_INFO(dev)->gen; 53 so->gen = INTEL_GEN(dev_priv);
53 so->rodata = render_state_get_rodata(dev, so->gen); 54 so->rodata = render_state_get_rodata(so->gen);
54 if (so->rodata == NULL) 55 if (so->rodata == NULL)
55 return 0; 56 return 0;
56 57
57 if (so->rodata->batch_items * 4 > 4096) 58 if (so->rodata->batch_items * 4 > 4096)
58 return -EINVAL; 59 return -EINVAL;
59 60
60 so->obj = i915_gem_alloc_object(dev, 4096); 61 so->obj = i915_gem_object_create(dev_priv->dev, 4096);
61 if (so->obj == NULL) 62 if (IS_ERR(so->obj))
62 return -ENOMEM; 63 return PTR_ERR(so->obj);
63 64
64 ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0); 65 ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
65 if (ret) 66 if (ret)
@@ -177,7 +178,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
177 if (WARN_ON(engine->id != RCS)) 178 if (WARN_ON(engine->id != RCS))
178 return -ENOENT; 179 return -ENOENT;
179 180
180 ret = render_state_init(so, engine->dev); 181 ret = render_state_init(so, engine->i915);
181 if (ret) 182 if (ret)
182 return ret; 183 return ret;
183 184
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 425e721aac58..538c30499848 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -131,7 +131,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
131 unsigned long count = 0; 131 unsigned long count = 0;
132 132
133 trace_i915_gem_shrink(dev_priv, target, flags); 133 trace_i915_gem_shrink(dev_priv, target, flags);
134 i915_gem_retire_requests(dev_priv->dev); 134 i915_gem_retire_requests(dev_priv);
135
136 /*
137 * Unbinding of objects will require HW access; Let us not wake the
138 * device just to recover a little memory. If absolutely necessary,
139 * we will force the wake during oom-notifier.
140 */
141 if ((flags & I915_SHRINK_BOUND) &&
142 !intel_runtime_pm_get_if_in_use(dev_priv))
143 flags &= ~I915_SHRINK_BOUND;
135 144
136 /* 145 /*
137 * As we may completely rewrite the (un)bound list whilst unbinding 146 * As we may completely rewrite the (un)bound list whilst unbinding
@@ -197,7 +206,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
197 list_splice(&still_in_list, phase->list); 206 list_splice(&still_in_list, phase->list);
198 } 207 }
199 208
200 i915_gem_retire_requests(dev_priv->dev); 209 if (flags & I915_SHRINK_BOUND)
210 intel_runtime_pm_put(dev_priv);
211
212 i915_gem_retire_requests(dev_priv);
201 213
202 return count; 214 return count;
203} 215}
@@ -345,7 +357,9 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
345 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) 357 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
346 return NOTIFY_DONE; 358 return NOTIFY_DONE;
347 359
360 intel_runtime_pm_get(dev_priv);
348 freed_pages = i915_gem_shrink_all(dev_priv); 361 freed_pages = i915_gem_shrink_all(dev_priv);
362 intel_runtime_pm_put(dev_priv);
349 363
350 /* Because we may be allocating inside our own driver, we cannot 364 /* Because we may be allocating inside our own driver, we cannot
351 * assert that there are no objects with pinned pages that are not 365 * assert that there are no objects with pinned pages that are not
@@ -386,17 +400,35 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
386 struct drm_i915_private *dev_priv = 400 struct drm_i915_private *dev_priv =
387 container_of(nb, struct drm_i915_private, mm.vmap_notifier); 401 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
388 struct shrinker_lock_uninterruptible slu; 402 struct shrinker_lock_uninterruptible slu;
389 unsigned long freed_pages; 403 struct i915_vma *vma, *next;
404 unsigned long freed_pages = 0;
405 int ret;
390 406
391 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) 407 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
392 return NOTIFY_DONE; 408 return NOTIFY_DONE;
393 409
394 freed_pages = i915_gem_shrink(dev_priv, -1UL, 410 /* Force everything onto the inactive lists */
395 I915_SHRINK_BOUND | 411 ret = i915_gpu_idle(dev_priv->dev);
396 I915_SHRINK_UNBOUND | 412 if (ret)
397 I915_SHRINK_ACTIVE | 413 goto out;
398 I915_SHRINK_VMAPS); 414
415 intel_runtime_pm_get(dev_priv);
416 freed_pages += i915_gem_shrink(dev_priv, -1UL,
417 I915_SHRINK_BOUND |
418 I915_SHRINK_UNBOUND |
419 I915_SHRINK_ACTIVE |
420 I915_SHRINK_VMAPS);
421 intel_runtime_pm_put(dev_priv);
422
423 /* We also want to clear any cached iomaps as they wrap vmap */
424 list_for_each_entry_safe(vma, next,
425 &dev_priv->ggtt.base.inactive_list, vm_link) {
426 unsigned long count = vma->node.size >> PAGE_SHIFT;
427 if (vma->iomap && i915_vma_unbind(vma) == 0)
428 freed_pages += count;
429 }
399 430
431out:
400 i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); 432 i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
401 433
402 *(unsigned long *)ptr += freed_pages; 434 *(unsigned long *)ptr += freed_pages;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index b7ce963fb8f8..f9253f2b7ba0 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -56,7 +56,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
56 56
57 /* See the comment at the drm_mm_init() call for more about this check. 57 /* See the comment at the drm_mm_init() call for more about this check.
58 * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */ 58 * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
59 if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096) 59 if (IS_GEN8(dev_priv) && start < 4096)
60 start = 4096; 60 start = 4096;
61 61
62 mutex_lock(&dev_priv->mm.stolen_lock); 62 mutex_lock(&dev_priv->mm.stolen_lock);
@@ -109,9 +109,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
109 if (INTEL_INFO(dev)->gen >= 3) { 109 if (INTEL_INFO(dev)->gen >= 3) {
110 u32 bsm; 110 u32 bsm;
111 111
112 pci_read_config_dword(dev->pdev, BSM, &bsm); 112 pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm);
113 113
114 base = bsm & BSM_MASK; 114 base = bsm & INTEL_BSM_MASK;
115 } else if (IS_I865G(dev)) { 115 } else if (IS_I865G(dev)) {
116 u16 toud = 0; 116 u16 toud = 0;
117 117
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b9bdb34032cd..a6eb5c47a49c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -125,7 +125,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
125 if (INTEL_INFO(obj->base.dev)->gen >= 4) 125 if (INTEL_INFO(obj->base.dev)->gen >= 4)
126 return true; 126 return true;
127 127
128 if (INTEL_INFO(obj->base.dev)->gen == 3) { 128 if (IS_GEN3(obj->base.dev)) {
129 if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) 129 if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
130 return false; 130 return false;
131 } else { 131 } else {
@@ -229,7 +229,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
229 */ 229 */
230 if (obj->map_and_fenceable && 230 if (obj->map_and_fenceable &&
231 !i915_gem_object_fence_ok(obj, args->tiling_mode)) 231 !i915_gem_object_fence_ok(obj, args->tiling_mode))
232 ret = i915_gem_object_ggtt_unbind(obj); 232 ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
233 233
234 if (ret == 0) { 234 if (ret == 0) {
235 if (obj->pages && 235 if (obj->pages &&
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 32d9726e38b1..2314c88323e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -706,7 +706,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
706static void 706static void
707i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) 707i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
708{ 708{
709 struct sg_page_iter sg_iter; 709 struct sgt_iter sgt_iter;
710 struct page *page;
710 711
711 BUG_ON(obj->userptr.work != NULL); 712 BUG_ON(obj->userptr.work != NULL);
712 __i915_gem_userptr_set_active(obj, false); 713 __i915_gem_userptr_set_active(obj, false);
@@ -716,9 +717,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
716 717
717 i915_gem_gtt_finish_object(obj); 718 i915_gem_gtt_finish_object(obj);
718 719
719 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 720 for_each_sgt_page(page, sgt_iter, obj->pages) {
720 struct page *page = sg_page_iter_page(&sg_iter);
721
722 if (obj->dirty) 721 if (obj->dirty)
723 set_page_dirty(page); 722 set_page_dirty(page);
724 723
@@ -855,11 +854,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
855 return 0; 854 return 0;
856} 855}
857 856
858int 857void i915_gem_init_userptr(struct drm_i915_private *dev_priv)
859i915_gem_init_userptr(struct drm_device *dev)
860{ 858{
861 struct drm_i915_private *dev_priv = to_i915(dev);
862 mutex_init(&dev_priv->mm_lock); 859 mutex_init(&dev_priv->mm_lock);
863 hash_init(dev_priv->mm_structs); 860 hash_init(dev_priv->mm_structs);
864 return 0;
865} 861}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 89725c9efc25..34ff2459ceea 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -411,7 +411,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
411 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 411 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
412 } 412 }
413 413
414 if (INTEL_INFO(dev)->gen == 7) 414 if (IS_GEN7(dev))
415 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 415 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
416 416
417 for (i = 0; i < ARRAY_SIZE(error->ring); i++) 417 for (i = 0; i < ARRAY_SIZE(error->ring); i++)
@@ -824,19 +824,18 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
824 return error_code; 824 return error_code;
825} 825}
826 826
827static void i915_gem_record_fences(struct drm_device *dev, 827static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
828 struct drm_i915_error_state *error) 828 struct drm_i915_error_state *error)
829{ 829{
830 struct drm_i915_private *dev_priv = dev->dev_private;
831 int i; 830 int i;
832 831
833 if (IS_GEN3(dev) || IS_GEN2(dev)) { 832 if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
834 for (i = 0; i < dev_priv->num_fence_regs; i++) 833 for (i = 0; i < dev_priv->num_fence_regs; i++)
835 error->fence[i] = I915_READ(FENCE_REG(i)); 834 error->fence[i] = I915_READ(FENCE_REG(i));
836 } else if (IS_GEN5(dev) || IS_GEN4(dev)) { 835 } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
837 for (i = 0; i < dev_priv->num_fence_regs; i++) 836 for (i = 0; i < dev_priv->num_fence_regs; i++)
838 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i)); 837 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
839 } else if (INTEL_INFO(dev)->gen >= 6) { 838 } else if (INTEL_GEN(dev_priv) >= 6) {
840 for (i = 0; i < dev_priv->num_fence_regs; i++) 839 for (i = 0; i < dev_priv->num_fence_regs; i++)
841 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i)); 840 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
842 } 841 }
@@ -851,7 +850,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
851 struct intel_engine_cs *to; 850 struct intel_engine_cs *to;
852 enum intel_engine_id id; 851 enum intel_engine_id id;
853 852
854 if (!i915_semaphore_is_enabled(dev_priv->dev)) 853 if (!i915_semaphore_is_enabled(dev_priv))
855 return; 854 return;
856 855
857 if (!error->semaphore_obj) 856 if (!error->semaphore_obj)
@@ -893,31 +892,29 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
893 } 892 }
894} 893}
895 894
896static void i915_record_ring_state(struct drm_device *dev, 895static void i915_record_ring_state(struct drm_i915_private *dev_priv,
897 struct drm_i915_error_state *error, 896 struct drm_i915_error_state *error,
898 struct intel_engine_cs *engine, 897 struct intel_engine_cs *engine,
899 struct drm_i915_error_ring *ering) 898 struct drm_i915_error_ring *ering)
900{ 899{
901 struct drm_i915_private *dev_priv = dev->dev_private; 900 if (INTEL_GEN(dev_priv) >= 6) {
902
903 if (INTEL_INFO(dev)->gen >= 6) {
904 ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base)); 901 ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
905 ering->fault_reg = I915_READ(RING_FAULT_REG(engine)); 902 ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
906 if (INTEL_INFO(dev)->gen >= 8) 903 if (INTEL_GEN(dev_priv) >= 8)
907 gen8_record_semaphore_state(dev_priv, error, engine, 904 gen8_record_semaphore_state(dev_priv, error, engine,
908 ering); 905 ering);
909 else 906 else
910 gen6_record_semaphore_state(dev_priv, engine, ering); 907 gen6_record_semaphore_state(dev_priv, engine, ering);
911 } 908 }
912 909
913 if (INTEL_INFO(dev)->gen >= 4) { 910 if (INTEL_GEN(dev_priv) >= 4) {
914 ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base)); 911 ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
915 ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base)); 912 ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
916 ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); 913 ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
917 ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base)); 914 ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
918 ering->instps = I915_READ(RING_INSTPS(engine->mmio_base)); 915 ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
919 ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); 916 ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
920 if (INTEL_INFO(dev)->gen >= 8) { 917 if (INTEL_GEN(dev_priv) >= 8) {
921 ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32; 918 ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
922 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32; 919 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
923 } 920 }
@@ -939,10 +936,10 @@ static void i915_record_ring_state(struct drm_device *dev,
939 ering->tail = I915_READ_TAIL(engine); 936 ering->tail = I915_READ_TAIL(engine);
940 ering->ctl = I915_READ_CTL(engine); 937 ering->ctl = I915_READ_CTL(engine);
941 938
942 if (I915_NEED_GFX_HWS(dev)) { 939 if (I915_NEED_GFX_HWS(dev_priv)) {
943 i915_reg_t mmio; 940 i915_reg_t mmio;
944 941
945 if (IS_GEN7(dev)) { 942 if (IS_GEN7(dev_priv)) {
946 switch (engine->id) { 943 switch (engine->id) {
947 default: 944 default:
948 case RCS: 945 case RCS:
@@ -958,7 +955,7 @@ static void i915_record_ring_state(struct drm_device *dev,
958 mmio = VEBOX_HWS_PGA_GEN7; 955 mmio = VEBOX_HWS_PGA_GEN7;
959 break; 956 break;
960 } 957 }
961 } else if (IS_GEN6(engine->dev)) { 958 } else if (IS_GEN6(engine->i915)) {
962 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 959 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
963 } else { 960 } else {
964 /* XXX: gen8 returns to sanity */ 961 /* XXX: gen8 returns to sanity */
@@ -971,18 +968,18 @@ static void i915_record_ring_state(struct drm_device *dev,
971 ering->hangcheck_score = engine->hangcheck.score; 968 ering->hangcheck_score = engine->hangcheck.score;
972 ering->hangcheck_action = engine->hangcheck.action; 969 ering->hangcheck_action = engine->hangcheck.action;
973 970
974 if (USES_PPGTT(dev)) { 971 if (USES_PPGTT(dev_priv)) {
975 int i; 972 int i;
976 973
977 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); 974 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
978 975
979 if (IS_GEN6(dev)) 976 if (IS_GEN6(dev_priv))
980 ering->vm_info.pp_dir_base = 977 ering->vm_info.pp_dir_base =
981 I915_READ(RING_PP_DIR_BASE_READ(engine)); 978 I915_READ(RING_PP_DIR_BASE_READ(engine));
982 else if (IS_GEN7(dev)) 979 else if (IS_GEN7(dev_priv))
983 ering->vm_info.pp_dir_base = 980 ering->vm_info.pp_dir_base =
984 I915_READ(RING_PP_DIR_BASE(engine)); 981 I915_READ(RING_PP_DIR_BASE(engine));
985 else if (INTEL_INFO(dev)->gen >= 8) 982 else if (INTEL_GEN(dev_priv) >= 8)
986 for (i = 0; i < 4; i++) { 983 for (i = 0; i < 4; i++) {
987 ering->vm_info.pdp[i] = 984 ering->vm_info.pdp[i] =
988 I915_READ(GEN8_RING_PDP_UDW(engine, i)); 985 I915_READ(GEN8_RING_PDP_UDW(engine, i));
@@ -998,7 +995,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
998 struct drm_i915_error_state *error, 995 struct drm_i915_error_state *error,
999 struct drm_i915_error_ring *ering) 996 struct drm_i915_error_ring *ering)
1000{ 997{
1001 struct drm_i915_private *dev_priv = engine->dev->dev_private; 998 struct drm_i915_private *dev_priv = engine->i915;
1002 struct drm_i915_gem_object *obj; 999 struct drm_i915_gem_object *obj;
1003 1000
1004 /* Currently render ring is the only HW context user */ 1001 /* Currently render ring is the only HW context user */
@@ -1016,10 +1013,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
1016 } 1013 }
1017} 1014}
1018 1015
1019static void i915_gem_record_rings(struct drm_device *dev, 1016static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
1020 struct drm_i915_error_state *error) 1017 struct drm_i915_error_state *error)
1021{ 1018{
1022 struct drm_i915_private *dev_priv = to_i915(dev);
1023 struct i915_ggtt *ggtt = &dev_priv->ggtt; 1019 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1024 struct drm_i915_gem_request *request; 1020 struct drm_i915_gem_request *request;
1025 int i, count; 1021 int i, count;
@@ -1030,12 +1026,12 @@ static void i915_gem_record_rings(struct drm_device *dev,
1030 1026
1031 error->ring[i].pid = -1; 1027 error->ring[i].pid = -1;
1032 1028
1033 if (engine->dev == NULL) 1029 if (!intel_engine_initialized(engine))
1034 continue; 1030 continue;
1035 1031
1036 error->ring[i].valid = true; 1032 error->ring[i].valid = true;
1037 1033
1038 i915_record_ring_state(dev, error, engine, &error->ring[i]); 1034 i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
1039 1035
1040 request = i915_gem_find_active_request(engine); 1036 request = i915_gem_find_active_request(engine);
1041 if (request) { 1037 if (request) {
@@ -1301,15 +1297,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1301 error->eir = I915_READ(EIR); 1297 error->eir = I915_READ(EIR);
1302 error->pgtbl_er = I915_READ(PGTBL_ER); 1298 error->pgtbl_er = I915_READ(PGTBL_ER);
1303 1299
1304 i915_get_extra_instdone(dev, error->extra_instdone); 1300 i915_get_extra_instdone(dev_priv, error->extra_instdone);
1305} 1301}
1306 1302
1307static void i915_error_capture_msg(struct drm_device *dev, 1303static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
1308 struct drm_i915_error_state *error, 1304 struct drm_i915_error_state *error,
1309 u32 engine_mask, 1305 u32 engine_mask,
1310 const char *error_msg) 1306 const char *error_msg)
1311{ 1307{
1312 struct drm_i915_private *dev_priv = dev->dev_private;
1313 u32 ecode; 1308 u32 ecode;
1314 int ring_id = -1, len; 1309 int ring_id = -1, len;
1315 1310
@@ -1317,7 +1312,7 @@ static void i915_error_capture_msg(struct drm_device *dev,
1317 1312
1318 len = scnprintf(error->error_msg, sizeof(error->error_msg), 1313 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1319 "GPU HANG: ecode %d:%d:0x%08x", 1314 "GPU HANG: ecode %d:%d:0x%08x",
1320 INTEL_INFO(dev)->gen, ring_id, ecode); 1315 INTEL_GEN(dev_priv), ring_id, ecode);
1321 1316
1322 if (ring_id != -1 && error->ring[ring_id].pid != -1) 1317 if (ring_id != -1 && error->ring[ring_id].pid != -1)
1323 len += scnprintf(error->error_msg + len, 1318 len += scnprintf(error->error_msg + len,
@@ -1352,11 +1347,11 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
1352 * out a structure which becomes available in debugfs for user level tools 1347 * out a structure which becomes available in debugfs for user level tools
1353 * to pick up. 1348 * to pick up.
1354 */ 1349 */
1355void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, 1350void i915_capture_error_state(struct drm_i915_private *dev_priv,
1351 u32 engine_mask,
1356 const char *error_msg) 1352 const char *error_msg)
1357{ 1353{
1358 static bool warned; 1354 static bool warned;
1359 struct drm_i915_private *dev_priv = dev->dev_private;
1360 struct drm_i915_error_state *error; 1355 struct drm_i915_error_state *error;
1361 unsigned long flags; 1356 unsigned long flags;
1362 1357
@@ -1372,15 +1367,15 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
1372 i915_capture_gen_state(dev_priv, error); 1367 i915_capture_gen_state(dev_priv, error);
1373 i915_capture_reg_state(dev_priv, error); 1368 i915_capture_reg_state(dev_priv, error);
1374 i915_gem_capture_buffers(dev_priv, error); 1369 i915_gem_capture_buffers(dev_priv, error);
1375 i915_gem_record_fences(dev, error); 1370 i915_gem_record_fences(dev_priv, error);
1376 i915_gem_record_rings(dev, error); 1371 i915_gem_record_rings(dev_priv, error);
1377 1372
1378 do_gettimeofday(&error->time); 1373 do_gettimeofday(&error->time);
1379 1374
1380 error->overlay = intel_overlay_capture_error_state(dev); 1375 error->overlay = intel_overlay_capture_error_state(dev_priv);
1381 error->display = intel_display_capture_error_state(dev); 1376 error->display = intel_display_capture_error_state(dev_priv);
1382 1377
1383 i915_error_capture_msg(dev, error, engine_mask, error_msg); 1378 i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
1384 DRM_INFO("%s\n", error->error_msg); 1379 DRM_INFO("%s\n", error->error_msg);
1385 1380
1386 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1381 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
@@ -1400,7 +1395,7 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
1400 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); 1395 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1401 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); 1396 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1402 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); 1397 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1403 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index); 1398 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index);
1404 warned = true; 1399 warned = true;
1405 } 1400 }
1406} 1401}
@@ -1450,17 +1445,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
1450} 1445}
1451 1446
1452/* NB: please notice the memset */ 1447/* NB: please notice the memset */
1453void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) 1448void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
1449 uint32_t *instdone)
1454{ 1450{
1455 struct drm_i915_private *dev_priv = dev->dev_private;
1456 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); 1451 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1457 1452
1458 if (IS_GEN2(dev) || IS_GEN3(dev)) 1453 if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
1459 instdone[0] = I915_READ(GEN2_INSTDONE); 1454 instdone[0] = I915_READ(GEN2_INSTDONE);
1460 else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { 1455 else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
1461 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); 1456 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
1462 instdone[1] = I915_READ(GEN4_INSTDONE1); 1457 instdone[1] = I915_READ(GEN4_INSTDONE1);
1463 } else if (INTEL_INFO(dev)->gen >= 7) { 1458 } else if (INTEL_GEN(dev_priv) >= 7) {
1464 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); 1459 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
1465 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 1460 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1466 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 1461 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index 80786d9f9ad3..cf5a65be4fe0 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -67,11 +67,11 @@
67#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */ 67#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
68#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4) 68#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
69 69
70/* Defines WOPCM space available to GuC firmware */
70#define GUC_WOPCM_SIZE _MMIO(0xc050) 71#define GUC_WOPCM_SIZE _MMIO(0xc050)
71#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
72
73/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */ 72/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
74#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE) 73#define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */
74#define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */
75 75
76#define GEN8_GT_PM_CONFIG _MMIO(0x138140) 76#define GEN8_GT_PM_CONFIG _MMIO(0x138140)
77#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140) 77#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index d40c13fb6643..ac72451c571c 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -158,8 +158,7 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
158 158
159 data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; 159 data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
160 /* WaRsDisableCoarsePowerGating:skl,bxt */ 160 /* WaRsDisableCoarsePowerGating:skl,bxt */
161 if (!intel_enable_rc6(dev) || 161 if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev))
162 NEEDS_WaRsDisableCoarsePowerGating(dev))
163 data[1] = 0; 162 data[1] = 0;
164 else 163 else
165 /* bit 0 and 1 are for Render and Media domain separately */ 164 /* bit 0 and 1 are for Render and Media domain separately */
@@ -361,10 +360,9 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
361 struct drm_i915_gem_object *client_obj = client->client_obj; 360 struct drm_i915_gem_object *client_obj = client->client_obj;
362 struct drm_i915_private *dev_priv = guc_to_i915(guc); 361 struct drm_i915_private *dev_priv = guc_to_i915(guc);
363 struct intel_engine_cs *engine; 362 struct intel_engine_cs *engine;
364 struct intel_context *ctx = client->owner; 363 struct i915_gem_context *ctx = client->owner;
365 struct guc_context_desc desc; 364 struct guc_context_desc desc;
366 struct sg_table *sg; 365 struct sg_table *sg;
367 enum intel_engine_id id;
368 u32 gfx_addr; 366 u32 gfx_addr;
369 367
370 memset(&desc, 0, sizeof(desc)); 368 memset(&desc, 0, sizeof(desc));
@@ -374,10 +372,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
374 desc.priority = client->priority; 372 desc.priority = client->priority;
375 desc.db_id = client->doorbell_id; 373 desc.db_id = client->doorbell_id;
376 374
377 for_each_engine_id(engine, dev_priv, id) { 375 for_each_engine(engine, dev_priv) {
376 struct intel_context *ce = &ctx->engine[engine->id];
378 struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id]; 377 struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
379 struct drm_i915_gem_object *obj; 378 struct drm_i915_gem_object *obj;
380 uint64_t ctx_desc;
381 379
382 /* TODO: We have a design issue to be solved here. Only when we 380 /* TODO: We have a design issue to be solved here. Only when we
383 * receive the first batch, we know which engine is used by the 381 * receive the first batch, we know which engine is used by the
@@ -386,20 +384,18 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
386 * for now who owns a GuC client. But for future owner of GuC 384 * for now who owns a GuC client. But for future owner of GuC
387 * client, need to make sure lrc is pinned prior to enter here. 385 * client, need to make sure lrc is pinned prior to enter here.
388 */ 386 */
389 obj = ctx->engine[id].state; 387 if (!ce->state)
390 if (!obj)
391 break; /* XXX: continue? */ 388 break; /* XXX: continue? */
392 389
393 ctx_desc = intel_lr_context_descriptor(ctx, engine); 390 lrc->context_desc = lower_32_bits(ce->lrc_desc);
394 lrc->context_desc = (u32)ctx_desc;
395 391
396 /* The state page is after PPHWSP */ 392 /* The state page is after PPHWSP */
397 gfx_addr = i915_gem_obj_ggtt_offset(obj); 393 gfx_addr = i915_gem_obj_ggtt_offset(ce->state);
398 lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE; 394 lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
399 lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | 395 lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
400 (engine->guc_id << GUC_ELC_ENGINE_OFFSET); 396 (engine->guc_id << GUC_ELC_ENGINE_OFFSET);
401 397
402 obj = ctx->engine[id].ringbuf->obj; 398 obj = ce->ringbuf->obj;
403 gfx_addr = i915_gem_obj_ggtt_offset(obj); 399 gfx_addr = i915_gem_obj_ggtt_offset(obj);
404 400
405 lrc->ring_begin = gfx_addr; 401 lrc->ring_begin = gfx_addr;
@@ -427,7 +423,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
427 desc.wq_size = client->wq_size; 423 desc.wq_size = client->wq_size;
428 424
429 /* 425 /*
430 * XXX: Take LRCs from an existing intel_context if this is not an 426 * XXX: Take LRCs from an existing context if this is not an
431 * IsKMDCreatedContext client 427 * IsKMDCreatedContext client
432 */ 428 */
433 desc.desc_private = (uintptr_t)client; 429 desc.desc_private = (uintptr_t)client;
@@ -451,47 +447,64 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
451 sizeof(desc) * client->ctx_index); 447 sizeof(desc) * client->ctx_index);
452} 448}
453 449
454int i915_guc_wq_check_space(struct i915_guc_client *gc) 450/**
451 * i915_guc_wq_check_space() - check that the GuC can accept a request
452 * @request: request associated with the commands
453 *
454 * Return: 0 if space is available
455 * -EAGAIN if space is not currently available
456 *
457 * This function must be called (and must return 0) before a request
458 * is submitted to the GuC via i915_guc_submit() below. Once a result
459 * of 0 has been returned, it remains valid until (but only until)
460 * the next call to submit().
461 *
462 * This precheck allows the caller to determine in advance that space
463 * will be available for the next submission before committing resources
464 * to it, and helps avoid late failures with complicated recovery paths.
465 */
466int i915_guc_wq_check_space(struct drm_i915_gem_request *request)
455{ 467{
468 const size_t wqi_size = sizeof(struct guc_wq_item);
469 struct i915_guc_client *gc = request->i915->guc.execbuf_client;
456 struct guc_process_desc *desc; 470 struct guc_process_desc *desc;
457 u32 size = sizeof(struct guc_wq_item); 471 u32 freespace;
458 int ret = -ETIMEDOUT, timeout_counter = 200;
459 472
460 if (!gc) 473 GEM_BUG_ON(gc == NULL);
461 return 0;
462 474
463 desc = gc->client_base + gc->proc_desc_offset; 475 desc = gc->client_base + gc->proc_desc_offset;
464 476
465 while (timeout_counter-- > 0) { 477 freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
466 if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) { 478 if (likely(freespace >= wqi_size))
467 ret = 0; 479 return 0;
468 break;
469 }
470 480
471 if (timeout_counter) 481 gc->no_wq_space += 1;
472 usleep_range(1000, 2000);
473 };
474 482
475 return ret; 483 return -EAGAIN;
476} 484}
477 485
478static int guc_add_workqueue_item(struct i915_guc_client *gc, 486static void guc_add_workqueue_item(struct i915_guc_client *gc,
479 struct drm_i915_gem_request *rq) 487 struct drm_i915_gem_request *rq)
480{ 488{
489 /* wqi_len is in DWords, and does not include the one-word header */
490 const size_t wqi_size = sizeof(struct guc_wq_item);
491 const u32 wqi_len = wqi_size/sizeof(u32) - 1;
481 struct guc_process_desc *desc; 492 struct guc_process_desc *desc;
482 struct guc_wq_item *wqi; 493 struct guc_wq_item *wqi;
483 void *base; 494 void *base;
484 u32 tail, wq_len, wq_off, space; 495 u32 freespace, tail, wq_off, wq_page;
485 496
486 desc = gc->client_base + gc->proc_desc_offset; 497 desc = gc->client_base + gc->proc_desc_offset;
487 space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
488 if (WARN_ON(space < sizeof(struct guc_wq_item)))
489 return -ENOSPC; /* shouldn't happen */
490 498
491 /* postincrement WQ tail for next time */ 499 /* Free space is guaranteed, see i915_guc_wq_check_space() above */
492 wq_off = gc->wq_tail; 500 freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
493 gc->wq_tail += sizeof(struct guc_wq_item); 501 GEM_BUG_ON(freespace < wqi_size);
494 gc->wq_tail &= gc->wq_size - 1; 502
503 /* The GuC firmware wants the tail index in QWords, not bytes */
504 tail = rq->tail;
505 GEM_BUG_ON(tail & 7);
506 tail >>= 3;
507 GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
495 508
496 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we 509 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
497 * should not have the case where structure wqi is across page, neither 510 * should not have the case where structure wqi is across page, neither
@@ -500,19 +513,23 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
500 * XXX: if not the case, we need save data to a temp wqi and copy it to 513 * XXX: if not the case, we need save data to a temp wqi and copy it to
501 * workqueue buffer dw by dw. 514 * workqueue buffer dw by dw.
502 */ 515 */
503 WARN_ON(sizeof(struct guc_wq_item) != 16); 516 BUILD_BUG_ON(wqi_size != 16);
504 WARN_ON(wq_off & 3);
505 517
506 /* wq starts from the page after doorbell / process_desc */ 518 /* postincrement WQ tail for next time */
507 base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 519 wq_off = gc->wq_tail;
508 (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT)); 520 gc->wq_tail += wqi_size;
521 gc->wq_tail &= gc->wq_size - 1;
522 GEM_BUG_ON(wq_off & (wqi_size - 1));
523
524 /* WQ starts from the page after doorbell / process_desc */
525 wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
509 wq_off &= PAGE_SIZE - 1; 526 wq_off &= PAGE_SIZE - 1;
527 base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, wq_page));
510 wqi = (struct guc_wq_item *)((char *)base + wq_off); 528 wqi = (struct guc_wq_item *)((char *)base + wq_off);
511 529
512 /* len does not include the header */ 530 /* Now fill in the 4-word work queue item */
513 wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
514 wqi->header = WQ_TYPE_INORDER | 531 wqi->header = WQ_TYPE_INORDER |
515 (wq_len << WQ_LEN_SHIFT) | 532 (wqi_len << WQ_LEN_SHIFT) |
516 (rq->engine->guc_id << WQ_TARGET_SHIFT) | 533 (rq->engine->guc_id << WQ_TARGET_SHIFT) |
517 WQ_NO_WCFLUSH_WAIT; 534 WQ_NO_WCFLUSH_WAIT;
518 535
@@ -520,48 +537,50 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
520 wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, 537 wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx,
521 rq->engine); 538 rq->engine);
522 539
523 /* The GuC firmware wants the tail index in QWords, not bytes */
524 tail = rq->ringbuf->tail >> 3;
525 wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT; 540 wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
526 wqi->fence_id = 0; /*XXX: what fence to be here */ 541 wqi->fence_id = rq->seqno;
527 542
528 kunmap_atomic(base); 543 kunmap_atomic(base);
529
530 return 0;
531} 544}
532 545
533/** 546/**
534 * i915_guc_submit() - Submit commands through GuC 547 * i915_guc_submit() - Submit commands through GuC
535 * @client: the guc client where commands will go through
536 * @rq: request associated with the commands 548 * @rq: request associated with the commands
537 * 549 *
538 * Return: 0 if succeed 550 * Return: 0 on success, otherwise an errno.
551 * (Note: nonzero really shouldn't happen!)
552 *
553 * The caller must have already called i915_guc_wq_check_space() above
554 * with a result of 0 (success) since the last request submission. This
555 * guarantees that there is space in the work queue for the new request,
556 * so enqueuing the item cannot fail.
557 *
558 * Bad Things Will Happen if the caller violates this protocol e.g. calls
559 * submit() when check() says there's no space, or calls submit() multiple
560 * times with no intervening check().
561 *
562 * The only error here arises if the doorbell hardware isn't functioning
563 * as expected, which really shouln't happen.
539 */ 564 */
540int i915_guc_submit(struct i915_guc_client *client, 565int i915_guc_submit(struct drm_i915_gem_request *rq)
541 struct drm_i915_gem_request *rq)
542{ 566{
543 struct intel_guc *guc = client->guc;
544 unsigned int engine_id = rq->engine->guc_id; 567 unsigned int engine_id = rq->engine->guc_id;
545 int q_ret, b_ret; 568 struct intel_guc *guc = &rq->i915->guc;
569 struct i915_guc_client *client = guc->execbuf_client;
570 int b_ret;
546 571
547 q_ret = guc_add_workqueue_item(client, rq); 572 guc_add_workqueue_item(client, rq);
548 if (q_ret == 0) 573 b_ret = guc_ring_doorbell(client);
549 b_ret = guc_ring_doorbell(client);
550 574
551 client->submissions[engine_id] += 1; 575 client->submissions[engine_id] += 1;
552 if (q_ret) { 576 client->retcode = b_ret;
553 client->q_fail += 1; 577 if (b_ret)
554 client->retcode = q_ret;
555 } else if (b_ret) {
556 client->b_fail += 1; 578 client->b_fail += 1;
557 client->retcode = q_ret = b_ret; 579
558 } else {
559 client->retcode = 0;
560 }
561 guc->submissions[engine_id] += 1; 580 guc->submissions[engine_id] += 1;
562 guc->last_seqno[engine_id] = rq->seqno; 581 guc->last_seqno[engine_id] = rq->seqno;
563 582
564 return q_ret; 583 return b_ret;
565} 584}
566 585
567/* 586/*
@@ -587,8 +606,8 @@ static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
587 struct drm_i915_private *dev_priv = dev->dev_private; 606 struct drm_i915_private *dev_priv = dev->dev_private;
588 struct drm_i915_gem_object *obj; 607 struct drm_i915_gem_object *obj;
589 608
590 obj = i915_gem_alloc_object(dev, size); 609 obj = i915_gem_object_create(dev, size);
591 if (!obj) 610 if (IS_ERR(obj))
592 return NULL; 611 return NULL;
593 612
594 if (i915_gem_object_get_pages(obj)) { 613 if (i915_gem_object_get_pages(obj)) {
@@ -678,7 +697,7 @@ static void guc_client_free(struct drm_device *dev,
678 */ 697 */
679static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, 698static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
680 uint32_t priority, 699 uint32_t priority,
681 struct intel_context *ctx) 700 struct i915_gem_context *ctx)
682{ 701{
683 struct i915_guc_client *client; 702 struct i915_guc_client *client;
684 struct drm_i915_private *dev_priv = dev->dev_private; 703 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -916,11 +935,12 @@ int i915_guc_submission_enable(struct drm_device *dev)
916{ 935{
917 struct drm_i915_private *dev_priv = dev->dev_private; 936 struct drm_i915_private *dev_priv = dev->dev_private;
918 struct intel_guc *guc = &dev_priv->guc; 937 struct intel_guc *guc = &dev_priv->guc;
919 struct intel_context *ctx = dev_priv->kernel_context;
920 struct i915_guc_client *client; 938 struct i915_guc_client *client;
921 939
922 /* client for execbuf submission */ 940 /* client for execbuf submission */
923 client = guc_client_alloc(dev, GUC_CTX_PRIORITY_KMD_NORMAL, ctx); 941 client = guc_client_alloc(dev,
942 GUC_CTX_PRIORITY_KMD_NORMAL,
943 dev_priv->kernel_context);
924 if (!client) { 944 if (!client) {
925 DRM_ERROR("Failed to create execbuf guc_client\n"); 945 DRM_ERROR("Failed to create execbuf guc_client\n");
926 return -ENOMEM; 946 return -ENOMEM;
@@ -967,10 +987,10 @@ int intel_guc_suspend(struct drm_device *dev)
967{ 987{
968 struct drm_i915_private *dev_priv = dev->dev_private; 988 struct drm_i915_private *dev_priv = dev->dev_private;
969 struct intel_guc *guc = &dev_priv->guc; 989 struct intel_guc *guc = &dev_priv->guc;
970 struct intel_context *ctx; 990 struct i915_gem_context *ctx;
971 u32 data[3]; 991 u32 data[3];
972 992
973 if (!i915.enable_guc_submission) 993 if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
974 return 0; 994 return 0;
975 995
976 ctx = dev_priv->kernel_context; 996 ctx = dev_priv->kernel_context;
@@ -993,10 +1013,10 @@ int intel_guc_resume(struct drm_device *dev)
993{ 1013{
994 struct drm_i915_private *dev_priv = dev->dev_private; 1014 struct drm_i915_private *dev_priv = dev->dev_private;
995 struct intel_guc *guc = &dev_priv->guc; 1015 struct intel_guc *guc = &dev_priv->guc;
996 struct intel_context *ctx; 1016 struct i915_gem_context *ctx;
997 u32 data[3]; 1017 u32 data[3];
998 1018
999 if (!i915.enable_guc_submission) 1019 if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
1000 return 0; 1020 return 0;
1001 1021
1002 ctx = dev_priv->kernel_context; 1022 ctx = dev_priv->kernel_context;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2f6fd33c07ba..5c7378374ae6 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -336,9 +336,8 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
336 __gen6_disable_pm_irq(dev_priv, mask); 336 __gen6_disable_pm_irq(dev_priv, mask);
337} 337}
338 338
339void gen6_reset_rps_interrupts(struct drm_device *dev) 339void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
340{ 340{
341 struct drm_i915_private *dev_priv = dev->dev_private;
342 i915_reg_t reg = gen6_pm_iir(dev_priv); 341 i915_reg_t reg = gen6_pm_iir(dev_priv);
343 342
344 spin_lock_irq(&dev_priv->irq_lock); 343 spin_lock_irq(&dev_priv->irq_lock);
@@ -349,10 +348,8 @@ void gen6_reset_rps_interrupts(struct drm_device *dev)
349 spin_unlock_irq(&dev_priv->irq_lock); 348 spin_unlock_irq(&dev_priv->irq_lock);
350} 349}
351 350
352void gen6_enable_rps_interrupts(struct drm_device *dev) 351void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
353{ 352{
354 struct drm_i915_private *dev_priv = dev->dev_private;
355
356 spin_lock_irq(&dev_priv->irq_lock); 353 spin_lock_irq(&dev_priv->irq_lock);
357 354
358 WARN_ON(dev_priv->rps.pm_iir); 355 WARN_ON(dev_priv->rps.pm_iir);
@@ -367,25 +364,11 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
367 364
368u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 365u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
369{ 366{
370 /* 367 return (mask & ~dev_priv->rps.pm_intr_keep);
371 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
372 * if GEN6_PM_UP_EI_EXPIRED is masked.
373 *
374 * TODO: verify if this can be reproduced on VLV,CHV.
375 */
376 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
377 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
378
379 if (INTEL_INFO(dev_priv)->gen >= 8)
380 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
381
382 return mask;
383} 368}
384 369
385void gen6_disable_rps_interrupts(struct drm_device *dev) 370void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
386{ 371{
387 struct drm_i915_private *dev_priv = dev->dev_private;
388
389 spin_lock_irq(&dev_priv->irq_lock); 372 spin_lock_irq(&dev_priv->irq_lock);
390 dev_priv->rps.interrupts_enabled = false; 373 dev_priv->rps.interrupts_enabled = false;
391 spin_unlock_irq(&dev_priv->irq_lock); 374 spin_unlock_irq(&dev_priv->irq_lock);
@@ -402,7 +385,7 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
402 385
403 spin_unlock_irq(&dev_priv->irq_lock); 386 spin_unlock_irq(&dev_priv->irq_lock);
404 387
405 synchronize_irq(dev->irq); 388 synchronize_irq(dev_priv->dev->irq);
406} 389}
407 390
408/** 391/**
@@ -607,17 +590,15 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
607 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 590 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
608 * @dev: drm device 591 * @dev: drm device
609 */ 592 */
610static void i915_enable_asle_pipestat(struct drm_device *dev) 593static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
611{ 594{
612 struct drm_i915_private *dev_priv = dev->dev_private; 595 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
613
614 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
615 return; 596 return;
616 597
617 spin_lock_irq(&dev_priv->irq_lock); 598 spin_lock_irq(&dev_priv->irq_lock);
618 599
619 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 600 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
620 if (INTEL_INFO(dev)->gen >= 4) 601 if (INTEL_GEN(dev_priv) >= 4)
621 i915_enable_pipestat(dev_priv, PIPE_A, 602 i915_enable_pipestat(dev_priv, PIPE_A,
622 PIPE_LEGACY_BLC_EVENT_STATUS); 603 PIPE_LEGACY_BLC_EVENT_STATUS);
623 604
@@ -750,7 +731,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
750 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 731 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
751 vtotal /= 2; 732 vtotal /= 2;
752 733
753 if (IS_GEN2(dev)) 734 if (IS_GEN2(dev_priv))
754 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 735 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
755 else 736 else
756 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 737 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
@@ -767,7 +748,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
767 * problem. We may need to extend this to include other platforms, 748 * problem. We may need to extend this to include other platforms,
768 * but so far testing only shows the problem on HSW. 749 * but so far testing only shows the problem on HSW.
769 */ 750 */
770 if (HAS_DDI(dev) && !position) { 751 if (HAS_DDI(dev_priv) && !position) {
771 int i, temp; 752 int i, temp;
772 753
773 for (i = 0; i < 100; i++) { 754 for (i = 0; i < 100; i++) {
@@ -835,7 +816,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
835 if (stime) 816 if (stime)
836 *stime = ktime_get(); 817 *stime = ktime_get();
837 818
838 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 819 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
839 /* No obvious pixelcount register. Only query vertical 820 /* No obvious pixelcount register. Only query vertical
840 * scanout position from Display scan line register. 821 * scanout position from Display scan line register.
841 */ 822 */
@@ -897,7 +878,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
897 else 878 else
898 position += vtotal - vbl_end; 879 position += vtotal - vbl_end;
899 880
900 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 881 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
901 *vpos = position; 882 *vpos = position;
902 *hpos = 0; 883 *hpos = 0;
903 } else { 884 } else {
@@ -955,9 +936,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
955 &crtc->hwmode); 936 &crtc->hwmode);
956} 937}
957 938
958static void ironlake_rps_change_irq_handler(struct drm_device *dev) 939static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
959{ 940{
960 struct drm_i915_private *dev_priv = dev->dev_private;
961 u32 busy_up, busy_down, max_avg, min_avg; 941 u32 busy_up, busy_down, max_avg, min_avg;
962 u8 new_delay; 942 u8 new_delay;
963 943
@@ -986,7 +966,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
986 new_delay = dev_priv->ips.min_delay; 966 new_delay = dev_priv->ips.min_delay;
987 } 967 }
988 968
989 if (ironlake_set_drps(dev, new_delay)) 969 if (ironlake_set_drps(dev_priv, new_delay))
990 dev_priv->ips.cur_delay = new_delay; 970 dev_priv->ips.cur_delay = new_delay;
991 971
992 spin_unlock(&mchdev_lock); 972 spin_unlock(&mchdev_lock);
@@ -1175,7 +1155,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
1175 new_delay += adj; 1155 new_delay += adj;
1176 new_delay = clamp_t(int, new_delay, min, max); 1156 new_delay = clamp_t(int, new_delay, min, max);
1177 1157
1178 intel_set_rps(dev_priv->dev, new_delay); 1158 intel_set_rps(dev_priv, new_delay);
1179 1159
1180 mutex_unlock(&dev_priv->rps.hw_lock); 1160 mutex_unlock(&dev_priv->rps.hw_lock);
1181out: 1161out:
@@ -1506,27 +1486,23 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1506 1486
1507} 1487}
1508 1488
1509static void gmbus_irq_handler(struct drm_device *dev) 1489static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1510{ 1490{
1511 struct drm_i915_private *dev_priv = dev->dev_private;
1512
1513 wake_up_all(&dev_priv->gmbus_wait_queue); 1491 wake_up_all(&dev_priv->gmbus_wait_queue);
1514} 1492}
1515 1493
1516static void dp_aux_irq_handler(struct drm_device *dev) 1494static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1517{ 1495{
1518 struct drm_i915_private *dev_priv = dev->dev_private;
1519
1520 wake_up_all(&dev_priv->gmbus_wait_queue); 1496 wake_up_all(&dev_priv->gmbus_wait_queue);
1521} 1497}
1522 1498
1523#if defined(CONFIG_DEBUG_FS) 1499#if defined(CONFIG_DEBUG_FS)
1524static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1500static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1501 enum pipe pipe,
1525 uint32_t crc0, uint32_t crc1, 1502 uint32_t crc0, uint32_t crc1,
1526 uint32_t crc2, uint32_t crc3, 1503 uint32_t crc2, uint32_t crc3,
1527 uint32_t crc4) 1504 uint32_t crc4)
1528{ 1505{
1529 struct drm_i915_private *dev_priv = dev->dev_private;
1530 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1506 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1531 struct intel_pipe_crc_entry *entry; 1507 struct intel_pipe_crc_entry *entry;
1532 int head, tail; 1508 int head, tail;
@@ -1550,7 +1526,8 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1550 1526
1551 entry = &pipe_crc->entries[head]; 1527 entry = &pipe_crc->entries[head];
1552 1528
1553 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1529 entry->frame = dev_priv->dev->driver->get_vblank_counter(dev_priv->dev,
1530 pipe);
1554 entry->crc[0] = crc0; 1531 entry->crc[0] = crc0;
1555 entry->crc[1] = crc1; 1532 entry->crc[1] = crc1;
1556 entry->crc[2] = crc2; 1533 entry->crc[2] = crc2;
@@ -1566,27 +1543,26 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1566} 1543}
1567#else 1544#else
1568static inline void 1545static inline void
1569display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1546display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1547 enum pipe pipe,
1570 uint32_t crc0, uint32_t crc1, 1548 uint32_t crc0, uint32_t crc1,
1571 uint32_t crc2, uint32_t crc3, 1549 uint32_t crc2, uint32_t crc3,
1572 uint32_t crc4) {} 1550 uint32_t crc4) {}
1573#endif 1551#endif
1574 1552
1575 1553
1576static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1554static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1555 enum pipe pipe)
1577{ 1556{
1578 struct drm_i915_private *dev_priv = dev->dev_private; 1557 display_pipe_crc_irq_handler(dev_priv, pipe,
1579
1580 display_pipe_crc_irq_handler(dev, pipe,
1581 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1558 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1582 0, 0, 0, 0); 1559 0, 0, 0, 0);
1583} 1560}
1584 1561
1585static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1562static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1563 enum pipe pipe)
1586{ 1564{
1587 struct drm_i915_private *dev_priv = dev->dev_private; 1565 display_pipe_crc_irq_handler(dev_priv, pipe,
1588
1589 display_pipe_crc_irq_handler(dev, pipe,
1590 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1566 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1591 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1567 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1592 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1568 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
@@ -1594,22 +1570,22 @@ static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1594 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1570 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1595} 1571}
1596 1572
1597static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1573static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1574 enum pipe pipe)
1598{ 1575{
1599 struct drm_i915_private *dev_priv = dev->dev_private;
1600 uint32_t res1, res2; 1576 uint32_t res1, res2;
1601 1577
1602 if (INTEL_INFO(dev)->gen >= 3) 1578 if (INTEL_GEN(dev_priv) >= 3)
1603 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1579 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1604 else 1580 else
1605 res1 = 0; 1581 res1 = 0;
1606 1582
1607 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1583 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1608 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1584 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1609 else 1585 else
1610 res2 = 0; 1586 res2 = 0;
1611 1587
1612 display_pipe_crc_irq_handler(dev, pipe, 1588 display_pipe_crc_irq_handler(dev_priv, pipe,
1613 I915_READ(PIPE_CRC_RES_RED(pipe)), 1589 I915_READ(PIPE_CRC_RES_RED(pipe)),
1614 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1590 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1615 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1591 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
@@ -1643,18 +1619,21 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1643 } 1619 }
1644} 1620}
1645 1621
1646static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) 1622static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
1623 enum pipe pipe)
1647{ 1624{
1648 if (!drm_handle_vblank(dev, pipe)) 1625 bool ret;
1649 return false;
1650 1626
1651 return true; 1627 ret = drm_handle_vblank(dev_priv->dev, pipe);
1628 if (ret)
1629 intel_finish_page_flip_mmio(dev_priv, pipe);
1630
1631 return ret;
1652} 1632}
1653 1633
1654static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir, 1634static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1655 u32 pipe_stats[I915_MAX_PIPES]) 1635 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1656{ 1636{
1657 struct drm_i915_private *dev_priv = dev->dev_private;
1658 int pipe; 1637 int pipe;
1659 1638
1660 spin_lock(&dev_priv->irq_lock); 1639 spin_lock(&dev_priv->irq_lock);
@@ -1710,31 +1689,28 @@ static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
1710 spin_unlock(&dev_priv->irq_lock); 1689 spin_unlock(&dev_priv->irq_lock);
1711} 1690}
1712 1691
1713static void valleyview_pipestat_irq_handler(struct drm_device *dev, 1692static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1714 u32 pipe_stats[I915_MAX_PIPES]) 1693 u32 pipe_stats[I915_MAX_PIPES])
1715{ 1694{
1716 struct drm_i915_private *dev_priv = to_i915(dev);
1717 enum pipe pipe; 1695 enum pipe pipe;
1718 1696
1719 for_each_pipe(dev_priv, pipe) { 1697 for_each_pipe(dev_priv, pipe) {
1720 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1698 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1721 intel_pipe_handle_vblank(dev, pipe)) 1699 intel_pipe_handle_vblank(dev_priv, pipe))
1722 intel_check_page_flip(dev, pipe); 1700 intel_check_page_flip(dev_priv, pipe);
1723 1701
1724 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1702 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1725 intel_prepare_page_flip(dev, pipe); 1703 intel_finish_page_flip_cs(dev_priv, pipe);
1726 intel_finish_page_flip(dev, pipe);
1727 }
1728 1704
1729 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1705 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1730 i9xx_pipe_crc_irq_handler(dev, pipe); 1706 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1731 1707
1732 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1708 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1733 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1709 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1734 } 1710 }
1735 1711
1736 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1712 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1737 gmbus_irq_handler(dev); 1713 gmbus_irq_handler(dev_priv);
1738} 1714}
1739 1715
1740static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1716static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
@@ -1747,12 +1723,13 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1747 return hotplug_status; 1723 return hotplug_status;
1748} 1724}
1749 1725
1750static void i9xx_hpd_irq_handler(struct drm_device *dev, 1726static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1751 u32 hotplug_status) 1727 u32 hotplug_status)
1752{ 1728{
1753 u32 pin_mask = 0, long_mask = 0; 1729 u32 pin_mask = 0, long_mask = 0;
1754 1730
1755 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1731 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1732 IS_CHERRYVIEW(dev_priv)) {
1756 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1733 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1757 1734
1758 if (hotplug_trigger) { 1735 if (hotplug_trigger) {
@@ -1760,11 +1737,11 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
1760 hotplug_trigger, hpd_status_g4x, 1737 hotplug_trigger, hpd_status_g4x,
1761 i9xx_port_hotplug_long_detect); 1738 i9xx_port_hotplug_long_detect);
1762 1739
1763 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1740 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1764 } 1741 }
1765 1742
1766 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1743 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1767 dp_aux_irq_handler(dev); 1744 dp_aux_irq_handler(dev_priv);
1768 } else { 1745 } else {
1769 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1746 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1770 1747
@@ -1772,7 +1749,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
1772 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1749 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1773 hotplug_trigger, hpd_status_i915, 1750 hotplug_trigger, hpd_status_i915,
1774 i9xx_port_hotplug_long_detect); 1751 i9xx_port_hotplug_long_detect);
1775 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1752 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1776 } 1753 }
1777 } 1754 }
1778} 1755}
@@ -1831,7 +1808,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1831 1808
1832 /* Call regardless, as some status bits might not be 1809 /* Call regardless, as some status bits might not be
1833 * signalled in iir */ 1810 * signalled in iir */
1834 valleyview_pipestat_irq_ack(dev, iir, pipe_stats); 1811 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1835 1812
1836 /* 1813 /*
1837 * VLV_IIR is single buffered, and reflects the level 1814 * VLV_IIR is single buffered, and reflects the level
@@ -1850,9 +1827,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1850 gen6_rps_irq_handler(dev_priv, pm_iir); 1827 gen6_rps_irq_handler(dev_priv, pm_iir);
1851 1828
1852 if (hotplug_status) 1829 if (hotplug_status)
1853 i9xx_hpd_irq_handler(dev, hotplug_status); 1830 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1854 1831
1855 valleyview_pipestat_irq_handler(dev, pipe_stats); 1832 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1856 } while (0); 1833 } while (0);
1857 1834
1858 enable_rpm_wakeref_asserts(dev_priv); 1835 enable_rpm_wakeref_asserts(dev_priv);
@@ -1911,7 +1888,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1911 1888
1912 /* Call regardless, as some status bits might not be 1889 /* Call regardless, as some status bits might not be
1913 * signalled in iir */ 1890 * signalled in iir */
1914 valleyview_pipestat_irq_ack(dev, iir, pipe_stats); 1891 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1915 1892
1916 /* 1893 /*
1917 * VLV_IIR is single buffered, and reflects the level 1894 * VLV_IIR is single buffered, and reflects the level
@@ -1927,9 +1904,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1927 gen8_gt_irq_handler(dev_priv, gt_iir); 1904 gen8_gt_irq_handler(dev_priv, gt_iir);
1928 1905
1929 if (hotplug_status) 1906 if (hotplug_status)
1930 i9xx_hpd_irq_handler(dev, hotplug_status); 1907 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1931 1908
1932 valleyview_pipestat_irq_handler(dev, pipe_stats); 1909 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1933 } while (0); 1910 } while (0);
1934 1911
1935 enable_rpm_wakeref_asserts(dev_priv); 1912 enable_rpm_wakeref_asserts(dev_priv);
@@ -1937,10 +1914,10 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1937 return ret; 1914 return ret;
1938} 1915}
1939 1916
1940static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 1917static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1918 u32 hotplug_trigger,
1941 const u32 hpd[HPD_NUM_PINS]) 1919 const u32 hpd[HPD_NUM_PINS])
1942{ 1920{
1943 struct drm_i915_private *dev_priv = to_i915(dev);
1944 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1921 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1945 1922
1946 /* 1923 /*
@@ -1966,16 +1943,15 @@ static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1966 dig_hotplug_reg, hpd, 1943 dig_hotplug_reg, hpd,
1967 pch_port_hotplug_long_detect); 1944 pch_port_hotplug_long_detect);
1968 1945
1969 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1946 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1970} 1947}
1971 1948
1972static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1949static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1973{ 1950{
1974 struct drm_i915_private *dev_priv = dev->dev_private;
1975 int pipe; 1951 int pipe;
1976 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1952 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1977 1953
1978 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1954 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
1979 1955
1980 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1956 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1981 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1957 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1985,10 +1961,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1985 } 1961 }
1986 1962
1987 if (pch_iir & SDE_AUX_MASK) 1963 if (pch_iir & SDE_AUX_MASK)
1988 dp_aux_irq_handler(dev); 1964 dp_aux_irq_handler(dev_priv);
1989 1965
1990 if (pch_iir & SDE_GMBUS) 1966 if (pch_iir & SDE_GMBUS)
1991 gmbus_irq_handler(dev); 1967 gmbus_irq_handler(dev_priv);
1992 1968
1993 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1969 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1994 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1970 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
@@ -2018,9 +1994,8 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
2018 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1994 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2019} 1995}
2020 1996
2021static void ivb_err_int_handler(struct drm_device *dev) 1997static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2022{ 1998{
2023 struct drm_i915_private *dev_priv = dev->dev_private;
2024 u32 err_int = I915_READ(GEN7_ERR_INT); 1999 u32 err_int = I915_READ(GEN7_ERR_INT);
2025 enum pipe pipe; 2000 enum pipe pipe;
2026 2001
@@ -2032,19 +2007,18 @@ static void ivb_err_int_handler(struct drm_device *dev)
2032 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2007 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2033 2008
2034 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2009 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2035 if (IS_IVYBRIDGE(dev)) 2010 if (IS_IVYBRIDGE(dev_priv))
2036 ivb_pipe_crc_irq_handler(dev, pipe); 2011 ivb_pipe_crc_irq_handler(dev_priv, pipe);
2037 else 2012 else
2038 hsw_pipe_crc_irq_handler(dev, pipe); 2013 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2039 } 2014 }
2040 } 2015 }
2041 2016
2042 I915_WRITE(GEN7_ERR_INT, err_int); 2017 I915_WRITE(GEN7_ERR_INT, err_int);
2043} 2018}
2044 2019
2045static void cpt_serr_int_handler(struct drm_device *dev) 2020static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2046{ 2021{
2047 struct drm_i915_private *dev_priv = dev->dev_private;
2048 u32 serr_int = I915_READ(SERR_INT); 2022 u32 serr_int = I915_READ(SERR_INT);
2049 2023
2050 if (serr_int & SERR_INT_POISON) 2024 if (serr_int & SERR_INT_POISON)
@@ -2062,13 +2036,12 @@ static void cpt_serr_int_handler(struct drm_device *dev)
2062 I915_WRITE(SERR_INT, serr_int); 2036 I915_WRITE(SERR_INT, serr_int);
2063} 2037}
2064 2038
2065static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 2039static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2066{ 2040{
2067 struct drm_i915_private *dev_priv = dev->dev_private;
2068 int pipe; 2041 int pipe;
2069 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2042 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2070 2043
2071 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 2044 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2072 2045
2073 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2046 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2074 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2047 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2078,10 +2051,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2078 } 2051 }
2079 2052
2080 if (pch_iir & SDE_AUX_MASK_CPT) 2053 if (pch_iir & SDE_AUX_MASK_CPT)
2081 dp_aux_irq_handler(dev); 2054 dp_aux_irq_handler(dev_priv);
2082 2055
2083 if (pch_iir & SDE_GMBUS_CPT) 2056 if (pch_iir & SDE_GMBUS_CPT)
2084 gmbus_irq_handler(dev); 2057 gmbus_irq_handler(dev_priv);
2085 2058
2086 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2059 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2087 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2060 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
@@ -2096,12 +2069,11 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2096 I915_READ(FDI_RX_IIR(pipe))); 2069 I915_READ(FDI_RX_IIR(pipe)));
2097 2070
2098 if (pch_iir & SDE_ERROR_CPT) 2071 if (pch_iir & SDE_ERROR_CPT)
2099 cpt_serr_int_handler(dev); 2072 cpt_serr_int_handler(dev_priv);
2100} 2073}
2101 2074
2102static void spt_irq_handler(struct drm_device *dev, u32 pch_iir) 2075static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2103{ 2076{
2104 struct drm_i915_private *dev_priv = dev->dev_private;
2105 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2077 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2106 ~SDE_PORTE_HOTPLUG_SPT; 2078 ~SDE_PORTE_HOTPLUG_SPT;
2107 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2079 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
@@ -2130,16 +2102,16 @@ static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
2130 } 2102 }
2131 2103
2132 if (pin_mask) 2104 if (pin_mask)
2133 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2105 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2134 2106
2135 if (pch_iir & SDE_GMBUS_CPT) 2107 if (pch_iir & SDE_GMBUS_CPT)
2136 gmbus_irq_handler(dev); 2108 gmbus_irq_handler(dev_priv);
2137} 2109}
2138 2110
2139static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 2111static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2112 u32 hotplug_trigger,
2140 const u32 hpd[HPD_NUM_PINS]) 2113 const u32 hpd[HPD_NUM_PINS])
2141{ 2114{
2142 struct drm_i915_private *dev_priv = to_i915(dev);
2143 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2115 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2144 2116
2145 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2117 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
@@ -2149,97 +2121,93 @@ static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2149 dig_hotplug_reg, hpd, 2121 dig_hotplug_reg, hpd,
2150 ilk_port_hotplug_long_detect); 2122 ilk_port_hotplug_long_detect);
2151 2123
2152 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2124 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2153} 2125}
2154 2126
2155static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 2127static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2128 u32 de_iir)
2156{ 2129{
2157 struct drm_i915_private *dev_priv = dev->dev_private;
2158 enum pipe pipe; 2130 enum pipe pipe;
2159 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2131 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2160 2132
2161 if (hotplug_trigger) 2133 if (hotplug_trigger)
2162 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk); 2134 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2163 2135
2164 if (de_iir & DE_AUX_CHANNEL_A) 2136 if (de_iir & DE_AUX_CHANNEL_A)
2165 dp_aux_irq_handler(dev); 2137 dp_aux_irq_handler(dev_priv);
2166 2138
2167 if (de_iir & DE_GSE) 2139 if (de_iir & DE_GSE)
2168 intel_opregion_asle_intr(dev); 2140 intel_opregion_asle_intr(dev_priv);
2169 2141
2170 if (de_iir & DE_POISON) 2142 if (de_iir & DE_POISON)
2171 DRM_ERROR("Poison interrupt\n"); 2143 DRM_ERROR("Poison interrupt\n");
2172 2144
2173 for_each_pipe(dev_priv, pipe) { 2145 for_each_pipe(dev_priv, pipe) {
2174 if (de_iir & DE_PIPE_VBLANK(pipe) && 2146 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2175 intel_pipe_handle_vblank(dev, pipe)) 2147 intel_pipe_handle_vblank(dev_priv, pipe))
2176 intel_check_page_flip(dev, pipe); 2148 intel_check_page_flip(dev_priv, pipe);
2177 2149
2178 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2150 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2179 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2151 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2180 2152
2181 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2153 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2182 i9xx_pipe_crc_irq_handler(dev, pipe); 2154 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2183 2155
2184 /* plane/pipes map 1:1 on ilk+ */ 2156 /* plane/pipes map 1:1 on ilk+ */
2185 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 2157 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2186 intel_prepare_page_flip(dev, pipe); 2158 intel_finish_page_flip_cs(dev_priv, pipe);
2187 intel_finish_page_flip_plane(dev, pipe);
2188 }
2189 } 2159 }
2190 2160
2191 /* check event from PCH */ 2161 /* check event from PCH */
2192 if (de_iir & DE_PCH_EVENT) { 2162 if (de_iir & DE_PCH_EVENT) {
2193 u32 pch_iir = I915_READ(SDEIIR); 2163 u32 pch_iir = I915_READ(SDEIIR);
2194 2164
2195 if (HAS_PCH_CPT(dev)) 2165 if (HAS_PCH_CPT(dev_priv))
2196 cpt_irq_handler(dev, pch_iir); 2166 cpt_irq_handler(dev_priv, pch_iir);
2197 else 2167 else
2198 ibx_irq_handler(dev, pch_iir); 2168 ibx_irq_handler(dev_priv, pch_iir);
2199 2169
2200 /* should clear PCH hotplug event before clear CPU irq */ 2170 /* should clear PCH hotplug event before clear CPU irq */
2201 I915_WRITE(SDEIIR, pch_iir); 2171 I915_WRITE(SDEIIR, pch_iir);
2202 } 2172 }
2203 2173
2204 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 2174 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2205 ironlake_rps_change_irq_handler(dev); 2175 ironlake_rps_change_irq_handler(dev_priv);
2206} 2176}
2207 2177
2208static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 2178static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2179 u32 de_iir)
2209{ 2180{
2210 struct drm_i915_private *dev_priv = dev->dev_private;
2211 enum pipe pipe; 2181 enum pipe pipe;
2212 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2182 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2213 2183
2214 if (hotplug_trigger) 2184 if (hotplug_trigger)
2215 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb); 2185 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2216 2186
2217 if (de_iir & DE_ERR_INT_IVB) 2187 if (de_iir & DE_ERR_INT_IVB)
2218 ivb_err_int_handler(dev); 2188 ivb_err_int_handler(dev_priv);
2219 2189
2220 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2190 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2221 dp_aux_irq_handler(dev); 2191 dp_aux_irq_handler(dev_priv);
2222 2192
2223 if (de_iir & DE_GSE_IVB) 2193 if (de_iir & DE_GSE_IVB)
2224 intel_opregion_asle_intr(dev); 2194 intel_opregion_asle_intr(dev_priv);
2225 2195
2226 for_each_pipe(dev_priv, pipe) { 2196 for_each_pipe(dev_priv, pipe) {
2227 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2197 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2228 intel_pipe_handle_vblank(dev, pipe)) 2198 intel_pipe_handle_vblank(dev_priv, pipe))
2229 intel_check_page_flip(dev, pipe); 2199 intel_check_page_flip(dev_priv, pipe);
2230 2200
2231 /* plane/pipes map 1:1 on ilk+ */ 2201 /* plane/pipes map 1:1 on ilk+ */
2232 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2202 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2233 intel_prepare_page_flip(dev, pipe); 2203 intel_finish_page_flip_cs(dev_priv, pipe);
2234 intel_finish_page_flip_plane(dev, pipe);
2235 }
2236 } 2204 }
2237 2205
2238 /* check event from PCH */ 2206 /* check event from PCH */
2239 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 2207 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2240 u32 pch_iir = I915_READ(SDEIIR); 2208 u32 pch_iir = I915_READ(SDEIIR);
2241 2209
2242 cpt_irq_handler(dev, pch_iir); 2210 cpt_irq_handler(dev_priv, pch_iir);
2243 2211
2244 /* clear PCH hotplug event before clear CPU irq */ 2212 /* clear PCH hotplug event before clear CPU irq */
2245 I915_WRITE(SDEIIR, pch_iir); 2213 I915_WRITE(SDEIIR, pch_iir);
@@ -2277,7 +2245,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2277 * able to process them after we restore SDEIER (as soon as we restore 2245 * able to process them after we restore SDEIER (as soon as we restore
2278 * it, we'll get an interrupt if SDEIIR still has something to process 2246 * it, we'll get an interrupt if SDEIIR still has something to process
2279 * due to its back queue). */ 2247 * due to its back queue). */
2280 if (!HAS_PCH_NOP(dev)) { 2248 if (!HAS_PCH_NOP(dev_priv)) {
2281 sde_ier = I915_READ(SDEIER); 2249 sde_ier = I915_READ(SDEIER);
2282 I915_WRITE(SDEIER, 0); 2250 I915_WRITE(SDEIER, 0);
2283 POSTING_READ(SDEIER); 2251 POSTING_READ(SDEIER);
@@ -2289,7 +2257,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2289 if (gt_iir) { 2257 if (gt_iir) {
2290 I915_WRITE(GTIIR, gt_iir); 2258 I915_WRITE(GTIIR, gt_iir);
2291 ret = IRQ_HANDLED; 2259 ret = IRQ_HANDLED;
2292 if (INTEL_INFO(dev)->gen >= 6) 2260 if (INTEL_GEN(dev_priv) >= 6)
2293 snb_gt_irq_handler(dev_priv, gt_iir); 2261 snb_gt_irq_handler(dev_priv, gt_iir);
2294 else 2262 else
2295 ilk_gt_irq_handler(dev_priv, gt_iir); 2263 ilk_gt_irq_handler(dev_priv, gt_iir);
@@ -2299,13 +2267,13 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2299 if (de_iir) { 2267 if (de_iir) {
2300 I915_WRITE(DEIIR, de_iir); 2268 I915_WRITE(DEIIR, de_iir);
2301 ret = IRQ_HANDLED; 2269 ret = IRQ_HANDLED;
2302 if (INTEL_INFO(dev)->gen >= 7) 2270 if (INTEL_GEN(dev_priv) >= 7)
2303 ivb_display_irq_handler(dev, de_iir); 2271 ivb_display_irq_handler(dev_priv, de_iir);
2304 else 2272 else
2305 ilk_display_irq_handler(dev, de_iir); 2273 ilk_display_irq_handler(dev_priv, de_iir);
2306 } 2274 }
2307 2275
2308 if (INTEL_INFO(dev)->gen >= 6) { 2276 if (INTEL_GEN(dev_priv) >= 6) {
2309 u32 pm_iir = I915_READ(GEN6_PMIIR); 2277 u32 pm_iir = I915_READ(GEN6_PMIIR);
2310 if (pm_iir) { 2278 if (pm_iir) {
2311 I915_WRITE(GEN6_PMIIR, pm_iir); 2279 I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -2316,7 +2284,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2316 2284
2317 I915_WRITE(DEIER, de_ier); 2285 I915_WRITE(DEIER, de_ier);
2318 POSTING_READ(DEIER); 2286 POSTING_READ(DEIER);
2319 if (!HAS_PCH_NOP(dev)) { 2287 if (!HAS_PCH_NOP(dev_priv)) {
2320 I915_WRITE(SDEIER, sde_ier); 2288 I915_WRITE(SDEIER, sde_ier);
2321 POSTING_READ(SDEIER); 2289 POSTING_READ(SDEIER);
2322 } 2290 }
@@ -2327,10 +2295,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2327 return ret; 2295 return ret;
2328} 2296}
2329 2297
2330static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 2298static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2299 u32 hotplug_trigger,
2331 const u32 hpd[HPD_NUM_PINS]) 2300 const u32 hpd[HPD_NUM_PINS])
2332{ 2301{
2333 struct drm_i915_private *dev_priv = to_i915(dev);
2334 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2302 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2335 2303
2336 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2304 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
@@ -2340,13 +2308,12 @@ static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2340 dig_hotplug_reg, hpd, 2308 dig_hotplug_reg, hpd,
2341 bxt_port_hotplug_long_detect); 2309 bxt_port_hotplug_long_detect);
2342 2310
2343 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2311 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2344} 2312}
2345 2313
2346static irqreturn_t 2314static irqreturn_t
2347gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2315gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2348{ 2316{
2349 struct drm_device *dev = dev_priv->dev;
2350 irqreturn_t ret = IRQ_NONE; 2317 irqreturn_t ret = IRQ_NONE;
2351 u32 iir; 2318 u32 iir;
2352 enum pipe pipe; 2319 enum pipe pipe;
@@ -2357,7 +2324,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2357 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2324 I915_WRITE(GEN8_DE_MISC_IIR, iir);
2358 ret = IRQ_HANDLED; 2325 ret = IRQ_HANDLED;
2359 if (iir & GEN8_DE_MISC_GSE) 2326 if (iir & GEN8_DE_MISC_GSE)
2360 intel_opregion_asle_intr(dev); 2327 intel_opregion_asle_intr(dev_priv);
2361 else 2328 else
2362 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2329 DRM_ERROR("Unexpected DE Misc interrupt\n");
2363 } 2330 }
@@ -2381,26 +2348,28 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2381 GEN9_AUX_CHANNEL_D; 2348 GEN9_AUX_CHANNEL_D;
2382 2349
2383 if (iir & tmp_mask) { 2350 if (iir & tmp_mask) {
2384 dp_aux_irq_handler(dev); 2351 dp_aux_irq_handler(dev_priv);
2385 found = true; 2352 found = true;
2386 } 2353 }
2387 2354
2388 if (IS_BROXTON(dev_priv)) { 2355 if (IS_BROXTON(dev_priv)) {
2389 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2356 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2390 if (tmp_mask) { 2357 if (tmp_mask) {
2391 bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt); 2358 bxt_hpd_irq_handler(dev_priv, tmp_mask,
2359 hpd_bxt);
2392 found = true; 2360 found = true;
2393 } 2361 }
2394 } else if (IS_BROADWELL(dev_priv)) { 2362 } else if (IS_BROADWELL(dev_priv)) {
2395 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2363 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2396 if (tmp_mask) { 2364 if (tmp_mask) {
2397 ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw); 2365 ilk_hpd_irq_handler(dev_priv,
2366 tmp_mask, hpd_bdw);
2398 found = true; 2367 found = true;
2399 } 2368 }
2400 } 2369 }
2401 2370
2402 if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) { 2371 if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2403 gmbus_irq_handler(dev); 2372 gmbus_irq_handler(dev_priv);
2404 found = true; 2373 found = true;
2405 } 2374 }
2406 2375
@@ -2427,8 +2396,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2427 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2396 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2428 2397
2429 if (iir & GEN8_PIPE_VBLANK && 2398 if (iir & GEN8_PIPE_VBLANK &&
2430 intel_pipe_handle_vblank(dev, pipe)) 2399 intel_pipe_handle_vblank(dev_priv, pipe))
2431 intel_check_page_flip(dev, pipe); 2400 intel_check_page_flip(dev_priv, pipe);
2432 2401
2433 flip_done = iir; 2402 flip_done = iir;
2434 if (INTEL_INFO(dev_priv)->gen >= 9) 2403 if (INTEL_INFO(dev_priv)->gen >= 9)
@@ -2436,13 +2405,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2436 else 2405 else
2437 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; 2406 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2438 2407
2439 if (flip_done) { 2408 if (flip_done)
2440 intel_prepare_page_flip(dev, pipe); 2409 intel_finish_page_flip_cs(dev_priv, pipe);
2441 intel_finish_page_flip_plane(dev, pipe);
2442 }
2443 2410
2444 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2411 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2445 hsw_pipe_crc_irq_handler(dev, pipe); 2412 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2446 2413
2447 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2414 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2448 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2415 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2459,7 +2426,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2459 fault_errors); 2426 fault_errors);
2460 } 2427 }
2461 2428
2462 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && 2429 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2463 master_ctl & GEN8_DE_PCH_IRQ) { 2430 master_ctl & GEN8_DE_PCH_IRQ) {
2464 /* 2431 /*
2465 * FIXME(BDW): Assume for now that the new interrupt handling 2432 * FIXME(BDW): Assume for now that the new interrupt handling
@@ -2472,9 +2439,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2472 ret = IRQ_HANDLED; 2439 ret = IRQ_HANDLED;
2473 2440
2474 if (HAS_PCH_SPT(dev_priv)) 2441 if (HAS_PCH_SPT(dev_priv))
2475 spt_irq_handler(dev, iir); 2442 spt_irq_handler(dev_priv, iir);
2476 else 2443 else
2477 cpt_irq_handler(dev, iir); 2444 cpt_irq_handler(dev_priv, iir);
2478 } else { 2445 } else {
2479 /* 2446 /*
2480 * Like on previous PCH there seems to be something 2447 * Like on previous PCH there seems to be something
@@ -2555,15 +2522,15 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2555 * Fire an error uevent so userspace can see that a hang or error 2522 * Fire an error uevent so userspace can see that a hang or error
2556 * was detected. 2523 * was detected.
2557 */ 2524 */
2558static void i915_reset_and_wakeup(struct drm_device *dev) 2525static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
2559{ 2526{
2560 struct drm_i915_private *dev_priv = to_i915(dev); 2527 struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj;
2561 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2528 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2562 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2529 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2563 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2530 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2564 int ret; 2531 int ret;
2565 2532
2566 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2533 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
2567 2534
2568 /* 2535 /*
2569 * Note that there's only one work item which does gpu resets, so we 2536 * Note that there's only one work item which does gpu resets, so we
@@ -2577,8 +2544,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
2577 */ 2544 */
2578 if (i915_reset_in_progress(&dev_priv->gpu_error)) { 2545 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
2579 DRM_DEBUG_DRIVER("resetting chip\n"); 2546 DRM_DEBUG_DRIVER("resetting chip\n");
2580 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2547 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
2581 reset_event);
2582 2548
2583 /* 2549 /*
2584 * In most cases it's guaranteed that we get here with an RPM 2550 * In most cases it's guaranteed that we get here with an RPM
@@ -2589,7 +2555,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
2589 */ 2555 */
2590 intel_runtime_pm_get(dev_priv); 2556 intel_runtime_pm_get(dev_priv);
2591 2557
2592 intel_prepare_reset(dev); 2558 intel_prepare_reset(dev_priv);
2593 2559
2594 /* 2560 /*
2595 * All state reset _must_ be completed before we update the 2561 * All state reset _must_ be completed before we update the
@@ -2597,14 +2563,14 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
2597 * pending state and not properly drop locks, resulting in 2563 * pending state and not properly drop locks, resulting in
2598 * deadlocks with the reset work. 2564 * deadlocks with the reset work.
2599 */ 2565 */
2600 ret = i915_reset(dev); 2566 ret = i915_reset(dev_priv);
2601 2567
2602 intel_finish_reset(dev); 2568 intel_finish_reset(dev_priv);
2603 2569
2604 intel_runtime_pm_put(dev_priv); 2570 intel_runtime_pm_put(dev_priv);
2605 2571
2606 if (ret == 0) 2572 if (ret == 0)
2607 kobject_uevent_env(&dev->primary->kdev->kobj, 2573 kobject_uevent_env(kobj,
2608 KOBJ_CHANGE, reset_done_event); 2574 KOBJ_CHANGE, reset_done_event);
2609 2575
2610 /* 2576 /*
@@ -2615,9 +2581,8 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
2615 } 2581 }
2616} 2582}
2617 2583
2618static void i915_report_and_clear_eir(struct drm_device *dev) 2584static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
2619{ 2585{
2620 struct drm_i915_private *dev_priv = dev->dev_private;
2621 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2586 uint32_t instdone[I915_NUM_INSTDONE_REG];
2622 u32 eir = I915_READ(EIR); 2587 u32 eir = I915_READ(EIR);
2623 int pipe, i; 2588 int pipe, i;
@@ -2627,9 +2592,9 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2627 2592
2628 pr_err("render error detected, EIR: 0x%08x\n", eir); 2593 pr_err("render error detected, EIR: 0x%08x\n", eir);
2629 2594
2630 i915_get_extra_instdone(dev, instdone); 2595 i915_get_extra_instdone(dev_priv, instdone);
2631 2596
2632 if (IS_G4X(dev)) { 2597 if (IS_G4X(dev_priv)) {
2633 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2598 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2634 u32 ipeir = I915_READ(IPEIR_I965); 2599 u32 ipeir = I915_READ(IPEIR_I965);
2635 2600
@@ -2651,7 +2616,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2651 } 2616 }
2652 } 2617 }
2653 2618
2654 if (!IS_GEN2(dev)) { 2619 if (!IS_GEN2(dev_priv)) {
2655 if (eir & I915_ERROR_PAGE_TABLE) { 2620 if (eir & I915_ERROR_PAGE_TABLE) {
2656 u32 pgtbl_err = I915_READ(PGTBL_ER); 2621 u32 pgtbl_err = I915_READ(PGTBL_ER);
2657 pr_err("page table error\n"); 2622 pr_err("page table error\n");
@@ -2673,7 +2638,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2673 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2638 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2674 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2639 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2675 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2640 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2676 if (INTEL_INFO(dev)->gen < 4) { 2641 if (INTEL_GEN(dev_priv) < 4) {
2677 u32 ipeir = I915_READ(IPEIR); 2642 u32 ipeir = I915_READ(IPEIR);
2678 2643
2679 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2644 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
@@ -2717,10 +2682,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2717 * so userspace knows something bad happened (should trigger collection 2682 * so userspace knows something bad happened (should trigger collection
2718 * of a ring dump etc.). 2683 * of a ring dump etc.).
2719 */ 2684 */
2720void i915_handle_error(struct drm_device *dev, u32 engine_mask, 2685void i915_handle_error(struct drm_i915_private *dev_priv,
2686 u32 engine_mask,
2721 const char *fmt, ...) 2687 const char *fmt, ...)
2722{ 2688{
2723 struct drm_i915_private *dev_priv = dev->dev_private;
2724 va_list args; 2689 va_list args;
2725 char error_msg[80]; 2690 char error_msg[80];
2726 2691
@@ -2728,8 +2693,8 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
2728 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2693 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2729 va_end(args); 2694 va_end(args);
2730 2695
2731 i915_capture_error_state(dev, engine_mask, error_msg); 2696 i915_capture_error_state(dev_priv, engine_mask, error_msg);
2732 i915_report_and_clear_eir(dev); 2697 i915_report_and_clear_eir(dev_priv);
2733 2698
2734 if (engine_mask) { 2699 if (engine_mask) {
2735 atomic_or(I915_RESET_IN_PROGRESS_FLAG, 2700 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
@@ -2751,7 +2716,7 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
2751 i915_error_wake_up(dev_priv, false); 2716 i915_error_wake_up(dev_priv, false);
2752 } 2717 }
2753 2718
2754 i915_reset_and_wakeup(dev); 2719 i915_reset_and_wakeup(dev_priv);
2755} 2720}
2756 2721
2757/* Called from drm generic code, passed 'crtc' which 2722/* Called from drm generic code, passed 'crtc' which
@@ -2869,9 +2834,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno)
2869} 2834}
2870 2835
2871static bool 2836static bool
2872ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2837ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr)
2873{ 2838{
2874 if (INTEL_INFO(dev)->gen >= 8) { 2839 if (INTEL_GEN(dev_priv) >= 8) {
2875 return (ipehr >> 23) == 0x1c; 2840 return (ipehr >> 23) == 0x1c;
2876 } else { 2841 } else {
2877 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2842 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
@@ -2884,10 +2849,10 @@ static struct intel_engine_cs *
2884semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, 2849semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2885 u64 offset) 2850 u64 offset)
2886{ 2851{
2887 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2852 struct drm_i915_private *dev_priv = engine->i915;
2888 struct intel_engine_cs *signaller; 2853 struct intel_engine_cs *signaller;
2889 2854
2890 if (INTEL_INFO(dev_priv)->gen >= 8) { 2855 if (INTEL_GEN(dev_priv) >= 8) {
2891 for_each_engine(signaller, dev_priv) { 2856 for_each_engine(signaller, dev_priv) {
2892 if (engine == signaller) 2857 if (engine == signaller)
2893 continue; 2858 continue;
@@ -2916,7 +2881,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2916static struct intel_engine_cs * 2881static struct intel_engine_cs *
2917semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) 2882semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2918{ 2883{
2919 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2884 struct drm_i915_private *dev_priv = engine->i915;
2920 u32 cmd, ipehr, head; 2885 u32 cmd, ipehr, head;
2921 u64 offset = 0; 2886 u64 offset = 0;
2922 int i, backwards; 2887 int i, backwards;
@@ -2942,7 +2907,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2942 return NULL; 2907 return NULL;
2943 2908
2944 ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); 2909 ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
2945 if (!ipehr_is_semaphore_wait(engine->dev, ipehr)) 2910 if (!ipehr_is_semaphore_wait(engine->i915, ipehr))
2946 return NULL; 2911 return NULL;
2947 2912
2948 /* 2913 /*
@@ -2954,7 +2919,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2954 * ringbuffer itself. 2919 * ringbuffer itself.
2955 */ 2920 */
2956 head = I915_READ_HEAD(engine) & HEAD_ADDR; 2921 head = I915_READ_HEAD(engine) & HEAD_ADDR;
2957 backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4; 2922 backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
2958 2923
2959 for (i = backwards; i; --i) { 2924 for (i = backwards; i; --i) {
2960 /* 2925 /*
@@ -2976,7 +2941,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2976 return NULL; 2941 return NULL;
2977 2942
2978 *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1; 2943 *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
2979 if (INTEL_INFO(engine->dev)->gen >= 8) { 2944 if (INTEL_GEN(dev_priv) >= 8) {
2980 offset = ioread32(engine->buffer->virtual_start + head + 12); 2945 offset = ioread32(engine->buffer->virtual_start + head + 12);
2981 offset <<= 32; 2946 offset <<= 32;
2982 offset = ioread32(engine->buffer->virtual_start + head + 8); 2947 offset = ioread32(engine->buffer->virtual_start + head + 8);
@@ -2986,7 +2951,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2986 2951
2987static int semaphore_passed(struct intel_engine_cs *engine) 2952static int semaphore_passed(struct intel_engine_cs *engine)
2988{ 2953{
2989 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2954 struct drm_i915_private *dev_priv = engine->i915;
2990 struct intel_engine_cs *signaller; 2955 struct intel_engine_cs *signaller;
2991 u32 seqno; 2956 u32 seqno;
2992 2957
@@ -3028,7 +2993,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
3028 if (engine->id != RCS) 2993 if (engine->id != RCS)
3029 return true; 2994 return true;
3030 2995
3031 i915_get_extra_instdone(engine->dev, instdone); 2996 i915_get_extra_instdone(engine->i915, instdone);
3032 2997
3033 /* There might be unstable subunit states even when 2998 /* There might be unstable subunit states even when
3034 * actual head is not moving. Filter out the unstable ones by 2999 * actual head is not moving. Filter out the unstable ones by
@@ -3069,8 +3034,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
3069static enum intel_ring_hangcheck_action 3034static enum intel_ring_hangcheck_action
3070ring_stuck(struct intel_engine_cs *engine, u64 acthd) 3035ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3071{ 3036{
3072 struct drm_device *dev = engine->dev; 3037 struct drm_i915_private *dev_priv = engine->i915;
3073 struct drm_i915_private *dev_priv = dev->dev_private;
3074 enum intel_ring_hangcheck_action ha; 3038 enum intel_ring_hangcheck_action ha;
3075 u32 tmp; 3039 u32 tmp;
3076 3040
@@ -3078,7 +3042,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3078 if (ha != HANGCHECK_HUNG) 3042 if (ha != HANGCHECK_HUNG)
3079 return ha; 3043 return ha;
3080 3044
3081 if (IS_GEN2(dev)) 3045 if (IS_GEN2(dev_priv))
3082 return HANGCHECK_HUNG; 3046 return HANGCHECK_HUNG;
3083 3047
3084 /* Is the chip hanging on a WAIT_FOR_EVENT? 3048 /* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -3088,19 +3052,19 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3088 */ 3052 */
3089 tmp = I915_READ_CTL(engine); 3053 tmp = I915_READ_CTL(engine);
3090 if (tmp & RING_WAIT) { 3054 if (tmp & RING_WAIT) {
3091 i915_handle_error(dev, 0, 3055 i915_handle_error(dev_priv, 0,
3092 "Kicking stuck wait on %s", 3056 "Kicking stuck wait on %s",
3093 engine->name); 3057 engine->name);
3094 I915_WRITE_CTL(engine, tmp); 3058 I915_WRITE_CTL(engine, tmp);
3095 return HANGCHECK_KICK; 3059 return HANGCHECK_KICK;
3096 } 3060 }
3097 3061
3098 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 3062 if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3099 switch (semaphore_passed(engine)) { 3063 switch (semaphore_passed(engine)) {
3100 default: 3064 default:
3101 return HANGCHECK_HUNG; 3065 return HANGCHECK_HUNG;
3102 case 1: 3066 case 1:
3103 i915_handle_error(dev, 0, 3067 i915_handle_error(dev_priv, 0,
3104 "Kicking stuck semaphore on %s", 3068 "Kicking stuck semaphore on %s",
3105 engine->name); 3069 engine->name);
3106 I915_WRITE_CTL(engine, tmp); 3070 I915_WRITE_CTL(engine, tmp);
@@ -3115,7 +3079,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3115 3079
3116static unsigned kick_waiters(struct intel_engine_cs *engine) 3080static unsigned kick_waiters(struct intel_engine_cs *engine)
3117{ 3081{
3118 struct drm_i915_private *i915 = to_i915(engine->dev); 3082 struct drm_i915_private *i915 = engine->i915;
3119 unsigned user_interrupts = READ_ONCE(engine->user_interrupts); 3083 unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
3120 3084
3121 if (engine->hangcheck.user_interrupts == user_interrupts && 3085 if (engine->hangcheck.user_interrupts == user_interrupts &&
@@ -3144,7 +3108,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3144 struct drm_i915_private *dev_priv = 3108 struct drm_i915_private *dev_priv =
3145 container_of(work, typeof(*dev_priv), 3109 container_of(work, typeof(*dev_priv),
3146 gpu_error.hangcheck_work.work); 3110 gpu_error.hangcheck_work.work);
3147 struct drm_device *dev = dev_priv->dev;
3148 struct intel_engine_cs *engine; 3111 struct intel_engine_cs *engine;
3149 enum intel_engine_id id; 3112 enum intel_engine_id id;
3150 int busy_count = 0, rings_hung = 0; 3113 int busy_count = 0, rings_hung = 0;
@@ -3272,22 +3235,22 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3272 } 3235 }
3273 3236
3274 if (rings_hung) { 3237 if (rings_hung) {
3275 i915_handle_error(dev, rings_hung, "Engine(s) hung"); 3238 i915_handle_error(dev_priv, rings_hung, "Engine(s) hung");
3276 goto out; 3239 goto out;
3277 } 3240 }
3278 3241
3279 if (busy_count) 3242 if (busy_count)
3280 /* Reset timer case chip hangs without another request 3243 /* Reset timer case chip hangs without another request
3281 * being added */ 3244 * being added */
3282 i915_queue_hangcheck(dev); 3245 i915_queue_hangcheck(dev_priv);
3283 3246
3284out: 3247out:
3285 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); 3248 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3286} 3249}
3287 3250
3288void i915_queue_hangcheck(struct drm_device *dev) 3251void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
3289{ 3252{
3290 struct i915_gpu_error *e = &to_i915(dev)->gpu_error; 3253 struct i915_gpu_error *e = &dev_priv->gpu_error;
3291 3254
3292 if (!i915.enable_hangcheck) 3255 if (!i915.enable_hangcheck)
3293 return; 3256 return;
@@ -3500,31 +3463,29 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
3500 spin_unlock_irq(&dev_priv->irq_lock); 3463 spin_unlock_irq(&dev_priv->irq_lock);
3501} 3464}
3502 3465
3503static u32 intel_hpd_enabled_irqs(struct drm_device *dev, 3466static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3504 const u32 hpd[HPD_NUM_PINS]) 3467 const u32 hpd[HPD_NUM_PINS])
3505{ 3468{
3506 struct drm_i915_private *dev_priv = to_i915(dev);
3507 struct intel_encoder *encoder; 3469 struct intel_encoder *encoder;
3508 u32 enabled_irqs = 0; 3470 u32 enabled_irqs = 0;
3509 3471
3510 for_each_intel_encoder(dev, encoder) 3472 for_each_intel_encoder(dev_priv->dev, encoder)
3511 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3473 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3512 enabled_irqs |= hpd[encoder->hpd_pin]; 3474 enabled_irqs |= hpd[encoder->hpd_pin];
3513 3475
3514 return enabled_irqs; 3476 return enabled_irqs;
3515} 3477}
3516 3478
3517static void ibx_hpd_irq_setup(struct drm_device *dev) 3479static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3518{ 3480{
3519 struct drm_i915_private *dev_priv = dev->dev_private;
3520 u32 hotplug_irqs, hotplug, enabled_irqs; 3481 u32 hotplug_irqs, hotplug, enabled_irqs;
3521 3482
3522 if (HAS_PCH_IBX(dev)) { 3483 if (HAS_PCH_IBX(dev_priv)) {
3523 hotplug_irqs = SDE_HOTPLUG_MASK; 3484 hotplug_irqs = SDE_HOTPLUG_MASK;
3524 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx); 3485 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3525 } else { 3486 } else {
3526 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3487 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3527 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt); 3488 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3528 } 3489 }
3529 3490
3530 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3491 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3543,18 +3504,17 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
3543 * When CPU and PCH are on the same package, port A 3504 * When CPU and PCH are on the same package, port A
3544 * HPD must be enabled in both north and south. 3505 * HPD must be enabled in both north and south.
3545 */ 3506 */
3546 if (HAS_PCH_LPT_LP(dev)) 3507 if (HAS_PCH_LPT_LP(dev_priv))
3547 hotplug |= PORTA_HOTPLUG_ENABLE; 3508 hotplug |= PORTA_HOTPLUG_ENABLE;
3548 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3509 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3549} 3510}
3550 3511
3551static void spt_hpd_irq_setup(struct drm_device *dev) 3512static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3552{ 3513{
3553 struct drm_i915_private *dev_priv = dev->dev_private;
3554 u32 hotplug_irqs, hotplug, enabled_irqs; 3514 u32 hotplug_irqs, hotplug, enabled_irqs;
3555 3515
3556 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3516 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3557 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt); 3517 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3558 3518
3559 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3519 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3560 3520
@@ -3569,24 +3529,23 @@ static void spt_hpd_irq_setup(struct drm_device *dev)
3569 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3529 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3570} 3530}
3571 3531
3572static void ilk_hpd_irq_setup(struct drm_device *dev) 3532static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3573{ 3533{
3574 struct drm_i915_private *dev_priv = dev->dev_private;
3575 u32 hotplug_irqs, hotplug, enabled_irqs; 3534 u32 hotplug_irqs, hotplug, enabled_irqs;
3576 3535
3577 if (INTEL_INFO(dev)->gen >= 8) { 3536 if (INTEL_GEN(dev_priv) >= 8) {
3578 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3537 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3579 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw); 3538 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3580 3539
3581 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3540 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3582 } else if (INTEL_INFO(dev)->gen >= 7) { 3541 } else if (INTEL_GEN(dev_priv) >= 7) {
3583 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3542 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3584 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb); 3543 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3585 3544
3586 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3545 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3587 } else { 3546 } else {
3588 hotplug_irqs = DE_DP_A_HOTPLUG; 3547 hotplug_irqs = DE_DP_A_HOTPLUG;
3589 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk); 3548 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3590 3549
3591 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3550 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3592 } 3551 }
@@ -3601,15 +3560,14 @@ static void ilk_hpd_irq_setup(struct drm_device *dev)
3601 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; 3560 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3602 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3561 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3603 3562
3604 ibx_hpd_irq_setup(dev); 3563 ibx_hpd_irq_setup(dev_priv);
3605} 3564}
3606 3565
3607static void bxt_hpd_irq_setup(struct drm_device *dev) 3566static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3608{ 3567{
3609 struct drm_i915_private *dev_priv = dev->dev_private;
3610 u32 hotplug_irqs, hotplug, enabled_irqs; 3568 u32 hotplug_irqs, hotplug, enabled_irqs;
3611 3569
3612 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt); 3570 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3613 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3571 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3614 3572
3615 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3573 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3827,6 +3785,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3827 uint32_t de_pipe_enables; 3785 uint32_t de_pipe_enables;
3828 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3786 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3829 u32 de_port_enables; 3787 u32 de_port_enables;
3788 u32 de_misc_masked = GEN8_DE_MISC_GSE;
3830 enum pipe pipe; 3789 enum pipe pipe;
3831 3790
3832 if (INTEL_INFO(dev_priv)->gen >= 9) { 3791 if (INTEL_INFO(dev_priv)->gen >= 9) {
@@ -3862,6 +3821,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3862 de_pipe_enables); 3821 de_pipe_enables);
3863 3822
3864 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3823 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3824 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3865} 3825}
3866 3826
3867static int gen8_irq_postinstall(struct drm_device *dev) 3827static int gen8_irq_postinstall(struct drm_device *dev)
@@ -4006,13 +3966,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
4006/* 3966/*
4007 * Returns true when a page flip has completed. 3967 * Returns true when a page flip has completed.
4008 */ 3968 */
4009static bool i8xx_handle_vblank(struct drm_device *dev, 3969static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
4010 int plane, int pipe, u32 iir) 3970 int plane, int pipe, u32 iir)
4011{ 3971{
4012 struct drm_i915_private *dev_priv = dev->dev_private;
4013 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3972 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4014 3973
4015 if (!intel_pipe_handle_vblank(dev, pipe)) 3974 if (!intel_pipe_handle_vblank(dev_priv, pipe))
4016 return false; 3975 return false;
4017 3976
4018 if ((iir & flip_pending) == 0) 3977 if ((iir & flip_pending) == 0)
@@ -4027,12 +3986,11 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
4027 if (I915_READ16(ISR) & flip_pending) 3986 if (I915_READ16(ISR) & flip_pending)
4028 goto check_page_flip; 3987 goto check_page_flip;
4029 3988
4030 intel_prepare_page_flip(dev, plane); 3989 intel_finish_page_flip_cs(dev_priv, pipe);
4031 intel_finish_page_flip(dev, pipe);
4032 return true; 3990 return true;
4033 3991
4034check_page_flip: 3992check_page_flip:
4035 intel_check_page_flip(dev, pipe); 3993 intel_check_page_flip(dev_priv, pipe);
4036 return false; 3994 return false;
4037} 3995}
4038 3996
@@ -4089,15 +4047,15 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4089 4047
4090 for_each_pipe(dev_priv, pipe) { 4048 for_each_pipe(dev_priv, pipe) {
4091 int plane = pipe; 4049 int plane = pipe;
4092 if (HAS_FBC(dev)) 4050 if (HAS_FBC(dev_priv))
4093 plane = !plane; 4051 plane = !plane;
4094 4052
4095 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4053 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4096 i8xx_handle_vblank(dev, plane, pipe, iir)) 4054 i8xx_handle_vblank(dev_priv, plane, pipe, iir))
4097 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4055 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4098 4056
4099 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4057 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4100 i9xx_pipe_crc_irq_handler(dev, pipe); 4058 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4101 4059
4102 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4060 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4103 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4061 intel_cpu_fifo_underrun_irq_handler(dev_priv,
@@ -4182,7 +4140,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
4182 I915_WRITE(IER, enable_mask); 4140 I915_WRITE(IER, enable_mask);
4183 POSTING_READ(IER); 4141 POSTING_READ(IER);
4184 4142
4185 i915_enable_asle_pipestat(dev); 4143 i915_enable_asle_pipestat(dev_priv);
4186 4144
4187 /* Interrupt setup is already guaranteed to be single-threaded, this is 4145 /* Interrupt setup is already guaranteed to be single-threaded, this is
4188 * just to make the assert_spin_locked check happy. */ 4146 * just to make the assert_spin_locked check happy. */
@@ -4197,13 +4155,12 @@ static int i915_irq_postinstall(struct drm_device *dev)
4197/* 4155/*
4198 * Returns true when a page flip has completed. 4156 * Returns true when a page flip has completed.
4199 */ 4157 */
4200static bool i915_handle_vblank(struct drm_device *dev, 4158static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
4201 int plane, int pipe, u32 iir) 4159 int plane, int pipe, u32 iir)
4202{ 4160{
4203 struct drm_i915_private *dev_priv = dev->dev_private;
4204 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 4161 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4205 4162
4206 if (!intel_pipe_handle_vblank(dev, pipe)) 4163 if (!intel_pipe_handle_vblank(dev_priv, pipe))
4207 return false; 4164 return false;
4208 4165
4209 if ((iir & flip_pending) == 0) 4166 if ((iir & flip_pending) == 0)
@@ -4218,12 +4175,11 @@ static bool i915_handle_vblank(struct drm_device *dev,
4218 if (I915_READ(ISR) & flip_pending) 4175 if (I915_READ(ISR) & flip_pending)
4219 goto check_page_flip; 4176 goto check_page_flip;
4220 4177
4221 intel_prepare_page_flip(dev, plane); 4178 intel_finish_page_flip_cs(dev_priv, pipe);
4222 intel_finish_page_flip(dev, pipe);
4223 return true; 4179 return true;
4224 4180
4225check_page_flip: 4181check_page_flip:
4226 intel_check_page_flip(dev, pipe); 4182 intel_check_page_flip(dev_priv, pipe);
4227 return false; 4183 return false;
4228} 4184}
4229 4185
@@ -4273,11 +4229,11 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4273 break; 4229 break;
4274 4230
4275 /* Consume port. Then clear IIR or we'll miss events */ 4231 /* Consume port. Then clear IIR or we'll miss events */
4276 if (I915_HAS_HOTPLUG(dev) && 4232 if (I915_HAS_HOTPLUG(dev_priv) &&
4277 iir & I915_DISPLAY_PORT_INTERRUPT) { 4233 iir & I915_DISPLAY_PORT_INTERRUPT) {
4278 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4234 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4279 if (hotplug_status) 4235 if (hotplug_status)
4280 i9xx_hpd_irq_handler(dev, hotplug_status); 4236 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4281 } 4237 }
4282 4238
4283 I915_WRITE(IIR, iir & ~flip_mask); 4239 I915_WRITE(IIR, iir & ~flip_mask);
@@ -4288,18 +4244,18 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4288 4244
4289 for_each_pipe(dev_priv, pipe) { 4245 for_each_pipe(dev_priv, pipe) {
4290 int plane = pipe; 4246 int plane = pipe;
4291 if (HAS_FBC(dev)) 4247 if (HAS_FBC(dev_priv))
4292 plane = !plane; 4248 plane = !plane;
4293 4249
4294 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4250 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4295 i915_handle_vblank(dev, plane, pipe, iir)) 4251 i915_handle_vblank(dev_priv, plane, pipe, iir))
4296 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4252 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4297 4253
4298 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4254 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4299 blc_event = true; 4255 blc_event = true;
4300 4256
4301 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4257 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4302 i9xx_pipe_crc_irq_handler(dev, pipe); 4258 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4303 4259
4304 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4260 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4305 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4261 intel_cpu_fifo_underrun_irq_handler(dev_priv,
@@ -4307,7 +4263,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4307 } 4263 }
4308 4264
4309 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4265 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4310 intel_opregion_asle_intr(dev); 4266 intel_opregion_asle_intr(dev_priv);
4311 4267
4312 /* With MSI, interrupts are only generated when iir 4268 /* With MSI, interrupts are only generated when iir
4313 * transitions from zero to nonzero. If another bit got 4269 * transitions from zero to nonzero. If another bit got
@@ -4391,7 +4347,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
4391 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4347 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4392 enable_mask |= I915_USER_INTERRUPT; 4348 enable_mask |= I915_USER_INTERRUPT;
4393 4349
4394 if (IS_G4X(dev)) 4350 if (IS_G4X(dev_priv))
4395 enable_mask |= I915_BSD_USER_INTERRUPT; 4351 enable_mask |= I915_BSD_USER_INTERRUPT;
4396 4352
4397 /* Interrupt setup is already guaranteed to be single-threaded, this is 4353 /* Interrupt setup is already guaranteed to be single-threaded, this is
@@ -4406,7 +4362,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
4406 * Enable some error detection, note the instruction error mask 4362 * Enable some error detection, note the instruction error mask
4407 * bit is reserved, so we leave it masked. 4363 * bit is reserved, so we leave it masked.
4408 */ 4364 */
4409 if (IS_G4X(dev)) { 4365 if (IS_G4X(dev_priv)) {
4410 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4366 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4411 GM45_ERROR_MEM_PRIV | 4367 GM45_ERROR_MEM_PRIV |
4412 GM45_ERROR_CP_PRIV | 4368 GM45_ERROR_CP_PRIV |
@@ -4424,26 +4380,25 @@ static int i965_irq_postinstall(struct drm_device *dev)
4424 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4380 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4425 POSTING_READ(PORT_HOTPLUG_EN); 4381 POSTING_READ(PORT_HOTPLUG_EN);
4426 4382
4427 i915_enable_asle_pipestat(dev); 4383 i915_enable_asle_pipestat(dev_priv);
4428 4384
4429 return 0; 4385 return 0;
4430} 4386}
4431 4387
4432static void i915_hpd_irq_setup(struct drm_device *dev) 4388static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4433{ 4389{
4434 struct drm_i915_private *dev_priv = dev->dev_private;
4435 u32 hotplug_en; 4390 u32 hotplug_en;
4436 4391
4437 assert_spin_locked(&dev_priv->irq_lock); 4392 assert_spin_locked(&dev_priv->irq_lock);
4438 4393
4439 /* Note HDMI and DP share hotplug bits */ 4394 /* Note HDMI and DP share hotplug bits */
4440 /* enable bits are the same for all generations */ 4395 /* enable bits are the same for all generations */
4441 hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915); 4396 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4442 /* Programming the CRT detection parameters tends 4397 /* Programming the CRT detection parameters tends
4443 to generate a spurious hotplug event about three 4398 to generate a spurious hotplug event about three
4444 seconds later. So just do it once. 4399 seconds later. So just do it once.
4445 */ 4400 */
4446 if (IS_G4X(dev)) 4401 if (IS_G4X(dev_priv))
4447 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4402 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4448 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4403 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4449 4404
@@ -4510,7 +4465,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4510 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 4465 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4511 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4466 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4512 if (hotplug_status) 4467 if (hotplug_status)
4513 i9xx_hpd_irq_handler(dev, hotplug_status); 4468 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4514 } 4469 }
4515 4470
4516 I915_WRITE(IIR, iir & ~flip_mask); 4471 I915_WRITE(IIR, iir & ~flip_mask);
@@ -4523,24 +4478,24 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4523 4478
4524 for_each_pipe(dev_priv, pipe) { 4479 for_each_pipe(dev_priv, pipe) {
4525 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4480 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4526 i915_handle_vblank(dev, pipe, pipe, iir)) 4481 i915_handle_vblank(dev_priv, pipe, pipe, iir))
4527 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4482 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4528 4483
4529 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4484 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4530 blc_event = true; 4485 blc_event = true;
4531 4486
4532 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4487 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4533 i9xx_pipe_crc_irq_handler(dev, pipe); 4488 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4534 4489
4535 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4490 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4536 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4491 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4537 } 4492 }
4538 4493
4539 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4494 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4540 intel_opregion_asle_intr(dev); 4495 intel_opregion_asle_intr(dev_priv);
4541 4496
4542 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4497 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4543 gmbus_irq_handler(dev); 4498 gmbus_irq_handler(dev_priv);
4544 4499
4545 /* With MSI, interrupts are only generated when iir 4500 /* With MSI, interrupts are only generated when iir
4546 * transitions from zero to nonzero. If another bit got 4501 * transitions from zero to nonzero. If another bit got
@@ -4611,6 +4566,20 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4611 else 4566 else
4612 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4567 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4613 4568
4569 dev_priv->rps.pm_intr_keep = 0;
4570
4571 /*
4572 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
4573 * if GEN6_PM_UP_EI_EXPIRED is masked.
4574 *
4575 * TODO: verify if this can be reproduced on VLV,CHV.
4576 */
4577 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
4578 dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
4579
4580 if (INTEL_INFO(dev_priv)->gen >= 8)
4581 dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
4582
4614 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, 4583 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4615 i915_hangcheck_elapsed); 4584 i915_hangcheck_elapsed);
4616 4585
@@ -4674,12 +4643,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4674 dev->driver->disable_vblank = ironlake_disable_vblank; 4643 dev->driver->disable_vblank = ironlake_disable_vblank;
4675 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4644 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4676 } else { 4645 } else {
4677 if (INTEL_INFO(dev_priv)->gen == 2) { 4646 if (IS_GEN2(dev_priv)) {
4678 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4647 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4679 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4648 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4680 dev->driver->irq_handler = i8xx_irq_handler; 4649 dev->driver->irq_handler = i8xx_irq_handler;
4681 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4650 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4682 } else if (INTEL_INFO(dev_priv)->gen == 3) { 4651 } else if (IS_GEN3(dev_priv)) {
4683 dev->driver->irq_preinstall = i915_irq_preinstall; 4652 dev->driver->irq_preinstall = i915_irq_preinstall;
4684 dev->driver->irq_postinstall = i915_irq_postinstall; 4653 dev->driver->irq_postinstall = i915_irq_postinstall;
4685 dev->driver->irq_uninstall = i915_irq_uninstall; 4654 dev->driver->irq_uninstall = i915_irq_uninstall;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 1779f02e6df8..5e18cf9f754d 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -54,10 +54,12 @@ struct i915_params i915 __read_mostly = {
54 .verbose_state_checks = 1, 54 .verbose_state_checks = 1,
55 .nuclear_pageflip = 0, 55 .nuclear_pageflip = 0,
56 .edp_vswing = 0, 56 .edp_vswing = 0,
57 .enable_guc_submission = false, 57 .enable_guc_loading = 0,
58 .enable_guc_submission = 0,
58 .guc_log_level = -1, 59 .guc_log_level = -1,
59 .enable_dp_mst = true, 60 .enable_dp_mst = true,
60 .inject_load_failure = 0, 61 .inject_load_failure = 0,
62 .enable_dpcd_backlight = false,
61}; 63};
62 64
63module_param_named(modeset, i915.modeset, int, 0400); 65module_param_named(modeset, i915.modeset, int, 0400);
@@ -197,8 +199,15 @@ MODULE_PARM_DESC(edp_vswing,
197 "(0=use value from vbt [default], 1=low power swing(200mV)," 199 "(0=use value from vbt [default], 1=low power swing(200mV),"
198 "2=default swing(400mV))"); 200 "2=default swing(400mV))");
199 201
200module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400); 202module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400);
201MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)"); 203MODULE_PARM_DESC(enable_guc_loading,
204 "Enable GuC firmware loading "
205 "(-1=auto, 0=never [default], 1=if available, 2=required)");
206
207module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400);
208MODULE_PARM_DESC(enable_guc_submission,
209 "Enable GuC submission "
210 "(-1=auto, 0=never [default], 1=if available, 2=required)");
202 211
203module_param_named(guc_log_level, i915.guc_log_level, int, 0400); 212module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
204MODULE_PARM_DESC(guc_log_level, 213MODULE_PARM_DESC(guc_log_level,
@@ -210,3 +219,6 @@ MODULE_PARM_DESC(enable_dp_mst,
210module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400); 219module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
211MODULE_PARM_DESC(inject_load_failure, 220MODULE_PARM_DESC(inject_load_failure,
212 "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); 221 "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
222module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600);
223MODULE_PARM_DESC(enable_dpcd_backlight,
224 "Enable support for DPCD backlight control (default:false)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 02bc27804291..1323261a0cdd 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -45,6 +45,8 @@ struct i915_params {
45 int enable_ips; 45 int enable_ips;
46 int invert_brightness; 46 int invert_brightness;
47 int enable_cmd_parser; 47 int enable_cmd_parser;
48 int enable_guc_loading;
49 int enable_guc_submission;
48 int guc_log_level; 50 int guc_log_level;
49 int use_mmio_flip; 51 int use_mmio_flip;
50 int mmio_debug; 52 int mmio_debug;
@@ -57,10 +59,10 @@ struct i915_params {
57 bool load_detect_test; 59 bool load_detect_test;
58 bool reset; 60 bool reset;
59 bool disable_display; 61 bool disable_display;
60 bool enable_guc_submission;
61 bool verbose_state_checks; 62 bool verbose_state_checks;
62 bool nuclear_pageflip; 63 bool nuclear_pageflip;
63 bool enable_dp_mst; 64 bool enable_dp_mst;
65 bool enable_dpcd_backlight;
64}; 66};
65 67
66extern struct i915_params i915 __read_mostly; 68extern struct i915_params i915 __read_mostly;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b407411e31ba..dfb4c7a88de3 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -886,7 +886,7 @@ enum skl_disp_power_wells {
886 * PLLs can be routed to any transcoder A/B/C. 886 * PLLs can be routed to any transcoder A/B/C.
887 * 887 *
888 * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is 888 * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
889 * digital port D (CHV) or port A (BXT). 889 * digital port D (CHV) or port A (BXT). ::
890 * 890 *
891 * 891 *
892 * Dual channel PHY (VLV/CHV/BXT) 892 * Dual channel PHY (VLV/CHV/BXT)
@@ -2449,6 +2449,8 @@ enum skl_disp_power_wells {
2449#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f 2449#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
2450#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 2450#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
2451 2451
2452#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024)
2453
2452#define _FPA0 0x6040 2454#define _FPA0 0x6040
2453#define _FPA1 0x6044 2455#define _FPA1 0x6044
2454#define _FPB0 0x6048 2456#define _FPB0 0x6048
@@ -6031,6 +6033,7 @@ enum skl_disp_power_wells {
6031#define CHICKEN_PAR1_1 _MMIO(0x42080) 6033#define CHICKEN_PAR1_1 _MMIO(0x42080)
6032#define DPA_MASK_VBLANK_SRD (1 << 15) 6034#define DPA_MASK_VBLANK_SRD (1 << 15)
6033#define FORCE_ARB_IDLE_PLANES (1 << 14) 6035#define FORCE_ARB_IDLE_PLANES (1 << 14)
6036#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
6034 6037
6035#define _CHICKEN_PIPESL_1_A 0x420b0 6038#define _CHICKEN_PIPESL_1_A 0x420b0
6036#define _CHICKEN_PIPESL_1_B 0x420b4 6039#define _CHICKEN_PIPESL_1_B 0x420b4
@@ -6089,7 +6092,14 @@ enum skl_disp_power_wells {
6089#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 6092#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
6090 6093
6091#define GEN8_L3SQCREG1 _MMIO(0xB100) 6094#define GEN8_L3SQCREG1 _MMIO(0xB100)
6092#define BDW_WA_L3SQCREG1_DEFAULT 0x784000 6095/*
6096 * Note that on CHV the following has an off-by-one error wrt. to BSpec.
6097 * Using the formula in BSpec leads to a hang, while the formula here works
6098 * fine and matches the formulas for all other platforms. A BSpec change
6099 * request has been filed to clarify this.
6100 */
6101#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
6102#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
6093 6103
6094#define GEN7_L3CNTLREG1 _MMIO(0xB01C) 6104#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
6095#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C 6105#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
@@ -7021,7 +7031,7 @@ enum skl_disp_power_wells {
7021#define VLV_RCEDATA _MMIO(0xA0BC) 7031#define VLV_RCEDATA _MMIO(0xA0BC)
7022#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0) 7032#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
7023#define GEN6_PMINTRMSK _MMIO(0xA168) 7033#define GEN6_PMINTRMSK _MMIO(0xA168)
7024#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31) 7034#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
7025#define VLV_PWRDWNUPCTL _MMIO(0xA294) 7035#define VLV_PWRDWNUPCTL _MMIO(0xA294)
7026#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4) 7036#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
7027#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8) 7037#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
@@ -7557,14 +7567,15 @@ enum skl_disp_power_wells {
7557#define CDCLK_FREQ_540 (1<<26) 7567#define CDCLK_FREQ_540 (1<<26)
7558#define CDCLK_FREQ_337_308 (2<<26) 7568#define CDCLK_FREQ_337_308 (2<<26)
7559#define CDCLK_FREQ_675_617 (3<<26) 7569#define CDCLK_FREQ_675_617 (3<<26)
7560#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
7561
7562#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22) 7570#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22)
7563#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22) 7571#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22)
7564#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22) 7572#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22)
7565#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22) 7573#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
7566#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22) 7574#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
7575#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
7576#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
7567#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) 7577#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
7578#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
7568 7579
7569/* LCPLL_CTL */ 7580/* LCPLL_CTL */
7570#define LCPLL1_CTL _MMIO(0x46010) 7581#define LCPLL1_CTL _MMIO(0x46010)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 2d576b7ff299..02507bfc8def 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -43,7 +43,7 @@ static u32 calc_residency(struct drm_device *dev,
43 u64 units = 128ULL, div = 100000ULL; 43 u64 units = 128ULL, div = 100000ULL;
44 u32 ret; 44 u32 ret;
45 45
46 if (!intel_enable_rc6(dev)) 46 if (!intel_enable_rc6())
47 return 0; 47 return 0;
48 48
49 intel_runtime_pm_get(dev_priv); 49 intel_runtime_pm_get(dev_priv);
@@ -70,8 +70,7 @@ static u32 calc_residency(struct drm_device *dev,
70static ssize_t 70static ssize_t
71show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) 71show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
72{ 72{
73 struct drm_minor *dminor = dev_to_drm_minor(kdev); 73 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
74 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
75} 74}
76 75
77static ssize_t 76static ssize_t
@@ -204,7 +203,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
204 struct drm_minor *dminor = dev_to_drm_minor(dev); 203 struct drm_minor *dminor = dev_to_drm_minor(dev);
205 struct drm_device *drm_dev = dminor->dev; 204 struct drm_device *drm_dev = dminor->dev;
206 struct drm_i915_private *dev_priv = drm_dev->dev_private; 205 struct drm_i915_private *dev_priv = drm_dev->dev_private;
207 struct intel_context *ctx; 206 struct i915_gem_context *ctx;
208 u32 *temp = NULL; /* Just here to make handling failures easy */ 207 u32 *temp = NULL; /* Just here to make handling failures easy */
209 int slice = (int)(uintptr_t)attr->private; 208 int slice = (int)(uintptr_t)attr->private;
210 int ret; 209 int ret;
@@ -397,7 +396,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
397 /* We still need *_set_rps to process the new max_delay and 396 /* We still need *_set_rps to process the new max_delay and
398 * update the interrupt limits and PMINTRMSK even though 397 * update the interrupt limits and PMINTRMSK even though
399 * frequency request may be unchanged. */ 398 * frequency request may be unchanged. */
400 intel_set_rps(dev, val); 399 intel_set_rps(dev_priv, val);
401 400
402 mutex_unlock(&dev_priv->rps.hw_lock); 401 mutex_unlock(&dev_priv->rps.hw_lock);
403 402
@@ -461,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
461 /* We still need *_set_rps to process the new min_delay and 460 /* We still need *_set_rps to process the new min_delay and
462 * update the interrupt limits and PMINTRMSK even though 461 * update the interrupt limits and PMINTRMSK even though
463 * frequency request may be unchanged. */ 462 * frequency request may be unchanged. */
464 intel_set_rps(dev, val); 463 intel_set_rps(dev_priv, val);
465 464
466 mutex_unlock(&dev_priv->rps.hw_lock); 465 mutex_unlock(&dev_priv->rps.hw_lock);
467 466
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index dc0def210097..6768db032f84 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
462 ), 462 ),
463 463
464 TP_fast_assign( 464 TP_fast_assign(
465 __entry->dev = from->dev->primary->index; 465 __entry->dev = from->i915->dev->primary->index;
466 __entry->sync_from = from->id; 466 __entry->sync_from = from->id;
467 __entry->sync_to = to_req->engine->id; 467 __entry->sync_to = to_req->engine->id;
468 __entry->seqno = i915_gem_request_get_seqno(req); 468 __entry->seqno = i915_gem_request_get_seqno(req);
@@ -486,13 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch,
486 ), 486 ),
487 487
488 TP_fast_assign( 488 TP_fast_assign(
489 struct intel_engine_cs *engine = 489 __entry->dev = req->i915->dev->primary->index;
490 i915_gem_request_get_engine(req); 490 __entry->ring = req->engine->id;
491 __entry->dev = engine->dev->primary->index; 491 __entry->seqno = req->seqno;
492 __entry->ring = engine->id;
493 __entry->seqno = i915_gem_request_get_seqno(req);
494 __entry->flags = flags; 492 __entry->flags = flags;
495 i915_trace_irq_get(engine, req); 493 i915_trace_irq_get(req->engine, req);
496 ), 494 ),
497 495
498 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", 496 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -511,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush,
511 ), 509 ),
512 510
513 TP_fast_assign( 511 TP_fast_assign(
514 __entry->dev = req->engine->dev->primary->index; 512 __entry->dev = req->i915->dev->primary->index;
515 __entry->ring = req->engine->id; 513 __entry->ring = req->engine->id;
516 __entry->invalidate = invalidate; 514 __entry->invalidate = invalidate;
517 __entry->flush = flush; 515 __entry->flush = flush;
@@ -533,11 +531,9 @@ DECLARE_EVENT_CLASS(i915_gem_request,
533 ), 531 ),
534 532
535 TP_fast_assign( 533 TP_fast_assign(
536 struct intel_engine_cs *engine = 534 __entry->dev = req->i915->dev->primary->index;
537 i915_gem_request_get_engine(req); 535 __entry->ring = req->engine->id;
538 __entry->dev = engine->dev->primary->index; 536 __entry->seqno = req->seqno;
539 __entry->ring = engine->id;
540 __entry->seqno = i915_gem_request_get_seqno(req);
541 ), 537 ),
542 538
543 TP_printk("dev=%u, ring=%u, seqno=%u", 539 TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -560,7 +556,7 @@ TRACE_EVENT(i915_gem_request_notify,
560 ), 556 ),
561 557
562 TP_fast_assign( 558 TP_fast_assign(
563 __entry->dev = engine->dev->primary->index; 559 __entry->dev = engine->i915->dev->primary->index;
564 __entry->ring = engine->id; 560 __entry->ring = engine->id;
565 __entry->seqno = engine->get_seqno(engine); 561 __entry->seqno = engine->get_seqno(engine);
566 ), 562 ),
@@ -597,13 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
597 * less desirable. 593 * less desirable.
598 */ 594 */
599 TP_fast_assign( 595 TP_fast_assign(
600 struct intel_engine_cs *engine = 596 __entry->dev = req->i915->dev->primary->index;
601 i915_gem_request_get_engine(req); 597 __entry->ring = req->engine->id;
602 __entry->dev = engine->dev->primary->index; 598 __entry->seqno = req->seqno;
603 __entry->ring = engine->id;
604 __entry->seqno = i915_gem_request_get_seqno(req);
605 __entry->blocking = 599 __entry->blocking =
606 mutex_is_locked(&engine->dev->struct_mutex); 600 mutex_is_locked(&req->i915->dev->struct_mutex);
607 ), 601 ),
608 602
609 TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", 603 TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
@@ -740,12 +734,12 @@ DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
740 * the context. 734 * the context.
741 */ 735 */
742DECLARE_EVENT_CLASS(i915_context, 736DECLARE_EVENT_CLASS(i915_context,
743 TP_PROTO(struct intel_context *ctx), 737 TP_PROTO(struct i915_gem_context *ctx),
744 TP_ARGS(ctx), 738 TP_ARGS(ctx),
745 739
746 TP_STRUCT__entry( 740 TP_STRUCT__entry(
747 __field(u32, dev) 741 __field(u32, dev)
748 __field(struct intel_context *, ctx) 742 __field(struct i915_gem_context *, ctx)
749 __field(struct i915_address_space *, vm) 743 __field(struct i915_address_space *, vm)
750 ), 744 ),
751 745
@@ -760,12 +754,12 @@ DECLARE_EVENT_CLASS(i915_context,
760) 754)
761 755
762DEFINE_EVENT(i915_context, i915_context_create, 756DEFINE_EVENT(i915_context, i915_context_create,
763 TP_PROTO(struct intel_context *ctx), 757 TP_PROTO(struct i915_gem_context *ctx),
764 TP_ARGS(ctx) 758 TP_ARGS(ctx)
765); 759);
766 760
767DEFINE_EVENT(i915_context, i915_context_free, 761DEFINE_EVENT(i915_context, i915_context_free,
768 TP_PROTO(struct intel_context *ctx), 762 TP_PROTO(struct i915_gem_context *ctx),
769 TP_ARGS(ctx) 763 TP_ARGS(ctx)
770); 764);
771 765
@@ -777,13 +771,13 @@ DEFINE_EVENT(i915_context, i915_context_free,
777 * called only if full ppgtt is enabled. 771 * called only if full ppgtt is enabled.
778 */ 772 */
779TRACE_EVENT(switch_mm, 773TRACE_EVENT(switch_mm,
780 TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to), 774 TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to),
781 775
782 TP_ARGS(engine, to), 776 TP_ARGS(engine, to),
783 777
784 TP_STRUCT__entry( 778 TP_STRUCT__entry(
785 __field(u32, ring) 779 __field(u32, ring)
786 __field(struct intel_context *, to) 780 __field(struct i915_gem_context *, to)
787 __field(struct i915_address_space *, vm) 781 __field(struct i915_address_space *, vm)
788 __field(u32, dev) 782 __field(u32, dev)
789 ), 783 ),
@@ -792,7 +786,7 @@ TRACE_EVENT(switch_mm,
792 __entry->ring = engine->id; 786 __entry->ring = engine->id;
793 __entry->to = to; 787 __entry->to = to;
794 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL; 788 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
795 __entry->dev = engine->dev->primary->index; 789 __entry->dev = engine->i915->dev->primary->index;
796 ), 790 ),
797 791
798 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", 792 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index d02efb8cad4d..004326291854 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -58,15 +58,14 @@
58 * This function is called at the initialization stage, to detect whether 58 * This function is called at the initialization stage, to detect whether
59 * running on a vGPU. 59 * running on a vGPU.
60 */ 60 */
61void i915_check_vgpu(struct drm_device *dev) 61void i915_check_vgpu(struct drm_i915_private *dev_priv)
62{ 62{
63 struct drm_i915_private *dev_priv = to_i915(dev);
64 uint64_t magic; 63 uint64_t magic;
65 uint32_t version; 64 uint32_t version;
66 65
67 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); 66 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
68 67
69 if (!IS_HASWELL(dev)) 68 if (!IS_HASWELL(dev_priv))
70 return; 69 return;
71 70
72 magic = __raw_i915_read64(dev_priv, vgtif_reg(magic)); 71 magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
@@ -136,7 +135,7 @@ static int vgt_balloon_space(struct drm_mm *mm,
136 135
137/** 136/**
138 * intel_vgt_balloon - balloon out reserved graphics address trunks 137 * intel_vgt_balloon - balloon out reserved graphics address trunks
139 * @dev: drm device 138 * @dev_priv: i915 device
140 * 139 *
141 * This function is called at the initialization stage, to balloon out the 140 * This function is called at the initialization stage, to balloon out the
142 * graphic address space allocated to other vGPUs, by marking these spaces as 141 * graphic address space allocated to other vGPUs, by marking these spaces as
@@ -151,28 +150,28 @@ static int vgt_balloon_space(struct drm_mm *mm,
151 * of its graphic space being zero. Yet there are some portions ballooned out( 150 * of its graphic space being zero. Yet there are some portions ballooned out(
152 * the shadow part, which are marked as reserved by drm allocator). From the 151 * the shadow part, which are marked as reserved by drm allocator). From the
153 * host point of view, the graphic address space is partitioned by multiple 152 * host point of view, the graphic address space is partitioned by multiple
154 * vGPUs in different VMs. 153 * vGPUs in different VMs. ::
155 * 154 *
156 * vGPU1 view Host view 155 * vGPU1 view Host view
157 * 0 ------> +-----------+ +-----------+ 156 * 0 ------> +-----------+ +-----------+
158 * ^ |///////////| | vGPU3 | 157 * ^ |###########| | vGPU3 |
159 * | |///////////| +-----------+ 158 * | |###########| +-----------+
160 * | |///////////| | vGPU2 | 159 * | |###########| | vGPU2 |
161 * | +-----------+ +-----------+ 160 * | +-----------+ +-----------+
162 * mappable GM | available | ==> | vGPU1 | 161 * mappable GM | available | ==> | vGPU1 |
163 * | +-----------+ +-----------+ 162 * | +-----------+ +-----------+
164 * | |///////////| | | 163 * | |###########| | |
165 * v |///////////| | Host | 164 * v |###########| | Host |
166 * +=======+===========+ +===========+ 165 * +=======+===========+ +===========+
167 * ^ |///////////| | vGPU3 | 166 * ^ |###########| | vGPU3 |
168 * | |///////////| +-----------+ 167 * | |###########| +-----------+
169 * | |///////////| | vGPU2 | 168 * | |###########| | vGPU2 |
170 * | +-----------+ +-----------+ 169 * | +-----------+ +-----------+
171 * unmappable GM | available | ==> | vGPU1 | 170 * unmappable GM | available | ==> | vGPU1 |
172 * | +-----------+ +-----------+ 171 * | +-----------+ +-----------+
173 * | |///////////| | | 172 * | |###########| | |
174 * | |///////////| | Host | 173 * | |###########| | Host |
175 * v |///////////| | | 174 * v |###########| | |
176 * total GM size ------> +-----------+ +-----------+ 175 * total GM size ------> +-----------+ +-----------+
177 * 176 *
178 * Returns: 177 * Returns:
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 3c83b47b5f69..21ffcfea5f5d 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -110,7 +110,7 @@ struct vgt_if {
110#define VGT_DRV_DISPLAY_NOT_READY 0 110#define VGT_DRV_DISPLAY_NOT_READY 0
111#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */ 111#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
112 112
113extern void i915_check_vgpu(struct drm_device *dev); 113extern void i915_check_vgpu(struct drm_i915_private *dev_priv);
114extern int intel_vgt_balloon(struct drm_device *dev); 114extern int intel_vgt_balloon(struct drm_device *dev);
115extern void intel_vgt_deballoon(void); 115extern void intel_vgt_deballoon(void);
116 116
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 50ff90aea721..c5a166752eda 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -191,7 +191,7 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
191 191
192 /* plane scaler case: assign as a plane scaler */ 192 /* plane scaler case: assign as a plane scaler */
193 /* find the plane that set the bit as scaler_user */ 193 /* find the plane that set the bit as scaler_user */
194 plane = drm_state->planes[i]; 194 plane = drm_state->planes[i].ptr;
195 195
196 /* 196 /*
197 * to enable/disable hq mode, add planes that are using scaler 197 * to enable/disable hq mode, add planes that are using scaler
@@ -223,7 +223,8 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
223 continue; 223 continue;
224 } 224 }
225 225
226 plane_state = to_intel_plane_state(drm_state->plane_states[i]); 226 plane_state = intel_atomic_get_existing_plane_state(drm_state,
227 intel_plane);
227 scaler_id = &plane_state->scaler_id; 228 scaler_id = &plane_state->scaler_id;
228 } 229 }
229 230
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 02a7527ce7bb..b9329c2a670a 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -621,17 +621,11 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
621static int i915_audio_component_get_cdclk_freq(struct device *dev) 621static int i915_audio_component_get_cdclk_freq(struct device *dev)
622{ 622{
623 struct drm_i915_private *dev_priv = dev_to_i915(dev); 623 struct drm_i915_private *dev_priv = dev_to_i915(dev);
624 int ret;
625 624
626 if (WARN_ON_ONCE(!HAS_DDI(dev_priv))) 625 if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
627 return -ENODEV; 626 return -ENODEV;
628 627
629 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); 628 return dev_priv->cdclk_freq;
630 ret = dev_priv->display.get_display_clock_speed(dev_priv->dev);
631
632 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
633
634 return ret;
635} 629}
636 630
637static int i915_audio_component_sync_audio_rate(struct device *dev, 631static int i915_audio_component_sync_audio_rate(struct device *dev,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b235b6e88ead..713a02db378a 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -139,6 +139,11 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
139 else 139 else
140 panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; 140 panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
141 141
142 panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) |
143 dvo_timing->himage_lo;
144 panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) |
145 dvo_timing->vimage_lo;
146
142 /* Some VBTs have bogus h/vtotal values */ 147 /* Some VBTs have bogus h/vtotal values */
143 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) 148 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
144 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; 149 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
@@ -213,7 +218,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
213 218
214 dev_priv->vbt.lvds_dither = lvds_options->pixel_dither; 219 dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
215 220
216 ret = intel_opregion_get_panel_type(dev_priv->dev); 221 ret = intel_opregion_get_panel_type(dev_priv);
217 if (ret >= 0) { 222 if (ret >= 0) {
218 WARN_ON(ret > 0xf); 223 WARN_ON(ret > 0xf);
219 panel_type = ret; 224 panel_type = ret;
@@ -318,6 +323,15 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
318 return; 323 return;
319 } 324 }
320 325
326 dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
327 if (bdb->version >= 191 &&
328 get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
329 const struct bdb_lfp_backlight_control_method *method;
330
331 method = &backlight_data->backlight_control[panel_type];
332 dev_priv->vbt.backlight.type = method->type;
333 }
334
321 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; 335 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
322 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; 336 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
323 dev_priv->vbt.backlight.min_brightness = entry->min_brightness; 337 dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
@@ -763,6 +777,16 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
763 return; 777 return;
764 } 778 }
765 779
780 /*
781 * These fields are introduced from the VBT version 197 onwards,
782 * so making sure that these bits are set zero in the previous
783 * versions.
784 */
785 if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) {
786 dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0;
787 dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0;
788 }
789
766 /* We have mandatory mipi config blocks. Initialize as generic panel */ 790 /* We have mandatory mipi config blocks. Initialize as generic panel */
767 dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; 791 dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
768} 792}
@@ -1187,7 +1211,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
1187 } 1211 }
1188 if (bdb->version < 106) { 1212 if (bdb->version < 106) {
1189 expected_size = 22; 1213 expected_size = 22;
1190 } else if (bdb->version < 109) { 1214 } else if (bdb->version < 111) {
1191 expected_size = 27; 1215 expected_size = 27;
1192 } else if (bdb->version < 195) { 1216 } else if (bdb->version < 195) {
1193 BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33); 1217 BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index ab0ea315eddb..8405b5a367d7 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -30,6 +30,14 @@
30#ifndef _INTEL_BIOS_H_ 30#ifndef _INTEL_BIOS_H_
31#define _INTEL_BIOS_H_ 31#define _INTEL_BIOS_H_
32 32
33enum intel_backlight_type {
34 INTEL_BACKLIGHT_PMIC,
35 INTEL_BACKLIGHT_LPSS,
36 INTEL_BACKLIGHT_DISPLAY_DDI,
37 INTEL_BACKLIGHT_DSI_DCS,
38 INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
39};
40
33struct edp_power_seq { 41struct edp_power_seq {
34 u16 t1_t3; 42 u16 t1_t3;
35 u16 t8; 43 u16 t8;
@@ -113,7 +121,13 @@ struct mipi_config {
113 u16 dual_link:2; 121 u16 dual_link:2;
114 u16 lane_cnt:2; 122 u16 lane_cnt:2;
115 u16 pixel_overlap:3; 123 u16 pixel_overlap:3;
116 u16 rsvd3:9; 124 u16 rgb_flip:1;
125#define DL_DCS_PORT_A 0x00
126#define DL_DCS_PORT_C 0x01
127#define DL_DCS_PORT_A_AND_C 0x02
128 u16 dl_dcs_cabc_ports:2;
129 u16 dl_dcs_backlight_ports:2;
130 u16 rsvd3:4;
117 131
118 u16 rsvd4; 132 u16 rsvd4;
119 133
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 1b3f97449395..522f5a2de015 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -547,7 +547,8 @@ void intel_color_init(struct drm_crtc *crtc)
547 /* Enable color management support when we have degamma & gamma LUTs. */ 547 /* Enable color management support when we have degamma & gamma LUTs. */
548 if (INTEL_INFO(dev)->color.degamma_lut_size != 0 && 548 if (INTEL_INFO(dev)->color.degamma_lut_size != 0 &&
549 INTEL_INFO(dev)->color.gamma_lut_size != 0) 549 INTEL_INFO(dev)->color.gamma_lut_size != 0)
550 drm_helper_crtc_enable_color_mgmt(crtc, 550 drm_crtc_enable_color_mgmt(crtc,
551 INTEL_INFO(dev)->color.degamma_lut_size, 551 INTEL_INFO(dev)->color.degamma_lut_size,
552 true,
552 INTEL_INFO(dev)->color.gamma_lut_size); 553 INTEL_INFO(dev)->color.gamma_lut_size);
553} 554}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 3fbb6fc66451..9465de4135aa 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -753,7 +753,6 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
753static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { 753static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
754 .mode_valid = intel_crt_mode_valid, 754 .mode_valid = intel_crt_mode_valid,
755 .get_modes = intel_crt_get_modes, 755 .get_modes = intel_crt_get_modes,
756 .best_encoder = intel_best_encoder,
757}; 756};
758 757
759static const struct drm_encoder_funcs intel_crt_enc_funcs = { 758static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -839,7 +838,7 @@ void intel_crt_init(struct drm_device *dev)
839 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 838 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
840 839
841 drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, 840 drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
842 DRM_MODE_ENCODER_DAC, NULL); 841 DRM_MODE_ENCODER_DAC, "CRT");
843 842
844 intel_connector_attach_encoder(intel_connector, &crt->base); 843 intel_connector_attach_encoder(intel_connector, &crt->base);
845 844
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index a34c23eceba0..2b3b428d9cd2 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -41,16 +41,22 @@
41 * be moved to FW_FAILED. 41 * be moved to FW_FAILED.
42 */ 42 */
43 43
44#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
45MODULE_FIRMWARE(I915_CSR_KBL);
46#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
47
44#define I915_CSR_SKL "i915/skl_dmc_ver1.bin" 48#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
49MODULE_FIRMWARE(I915_CSR_SKL);
50#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
51
45#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" 52#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
53MODULE_FIRMWARE(I915_CSR_BXT);
54#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
46 55
47#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares" 56#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
48 57
49MODULE_FIRMWARE(I915_CSR_SKL);
50MODULE_FIRMWARE(I915_CSR_BXT);
51 58
52#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) 59
53#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
54 60
55#define CSR_MAX_FW_SIZE 0x2FFF 61#define CSR_MAX_FW_SIZE 0x2FFF
56#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF 62#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
@@ -169,12 +175,10 @@ struct stepping_info {
169 char substepping; 175 char substepping;
170}; 176};
171 177
172/*
173 * Kabylake derivated from Skylake H0, so SKL H0
174 * is the right firmware for KBL A0 (revid 0).
175 */
176static const struct stepping_info kbl_stepping_info[] = { 178static const struct stepping_info kbl_stepping_info[] = {
177 {'H', '0'}, {'I', '0'} 179 {'A', '0'}, {'B', '0'}, {'C', '0'},
180 {'D', '0'}, {'E', '0'}, {'F', '0'},
181 {'G', '0'}, {'H', '0'}, {'I', '0'},
178}; 182};
179 183
180static const struct stepping_info skl_stepping_info[] = { 184static const struct stepping_info skl_stepping_info[] = {
@@ -298,7 +302,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
298 302
299 csr->version = css_header->version; 303 csr->version = css_header->version;
300 304
301 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 305 if (IS_KABYLAKE(dev_priv)) {
306 required_min_version = KBL_CSR_VERSION_REQUIRED;
307 } else if (IS_SKYLAKE(dev_priv)) {
302 required_min_version = SKL_CSR_VERSION_REQUIRED; 308 required_min_version = SKL_CSR_VERSION_REQUIRED;
303 } else if (IS_BROXTON(dev_priv)) { 309 } else if (IS_BROXTON(dev_priv)) {
304 required_min_version = BXT_CSR_VERSION_REQUIRED; 310 required_min_version = BXT_CSR_VERSION_REQUIRED;
@@ -446,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
446 if (!HAS_CSR(dev_priv)) 452 if (!HAS_CSR(dev_priv))
447 return; 453 return;
448 454
449 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 455 if (IS_KABYLAKE(dev_priv))
456 csr->fw_path = I915_CSR_KBL;
457 else if (IS_SKYLAKE(dev_priv))
450 csr->fw_path = I915_CSR_SKL; 458 csr->fw_path = I915_CSR_SKL;
451 else if (IS_BROXTON(dev_priv)) 459 else if (IS_BROXTON(dev_priv))
452 csr->fw_path = I915_CSR_BXT; 460 csr->fw_path = I915_CSR_BXT;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 01e523df363b..022b41d422dc 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -948,7 +948,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
948{ 948{
949 struct intel_shared_dpll *pll; 949 struct intel_shared_dpll *pll;
950 struct intel_dpll_hw_state *state; 950 struct intel_dpll_hw_state *state;
951 intel_clock_t clock; 951 struct dpll clock;
952 952
953 /* For DDI ports we always use a shared PLL. */ 953 /* For DDI ports we always use a shared PLL. */
954 if (WARN_ON(dpll == DPLL_ID_PRIVATE)) 954 if (WARN_ON(dpll == DPLL_ID_PRIVATE))
@@ -2347,7 +2347,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
2347 encoder = &intel_encoder->base; 2347 encoder = &intel_encoder->base;
2348 2348
2349 drm_encoder_init(dev, encoder, &intel_ddi_funcs, 2349 drm_encoder_init(dev, encoder, &intel_ddi_funcs,
2350 DRM_MODE_ENCODER_TMDS, NULL); 2350 DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
2351 2351
2352 intel_encoder->compute_config = intel_ddi_compute_config; 2352 intel_encoder->compute_config = intel_ddi_compute_config;
2353 intel_encoder->enable = intel_enable_ddi; 2353 intel_encoder->enable = intel_enable_ddi;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2113f401f0ba..49322f6cfa2b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -48,6 +48,11 @@
48#include <linux/reservation.h> 48#include <linux/reservation.h>
49#include <linux/dma-buf.h> 49#include <linux/dma-buf.h>
50 50
51static bool is_mmio_work(struct intel_flip_work *work)
52{
53 return work->mmio_work.func;
54}
55
51/* Primary plane formats for gen <= 3 */ 56/* Primary plane formats for gen <= 3 */
52static const uint32_t i8xx_primary_formats[] = { 57static const uint32_t i8xx_primary_formats[] = {
53 DRM_FORMAT_C8, 58 DRM_FORMAT_C8,
@@ -117,20 +122,18 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
117static void ironlake_pfit_enable(struct intel_crtc *crtc); 122static void ironlake_pfit_enable(struct intel_crtc *crtc);
118static void intel_modeset_setup_hw_state(struct drm_device *dev); 123static void intel_modeset_setup_hw_state(struct drm_device *dev);
119static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 124static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
125static int ilk_max_pixel_rate(struct drm_atomic_state *state);
126static int broxton_calc_cdclk(int max_pixclk);
120 127
121typedef struct {
122 int min, max;
123} intel_range_t;
124
125typedef struct {
126 int dot_limit;
127 int p2_slow, p2_fast;
128} intel_p2_t;
129
130typedef struct intel_limit intel_limit_t;
131struct intel_limit { 128struct intel_limit {
132 intel_range_t dot, vco, n, m, m1, m2, p, p1; 129 struct {
133 intel_p2_t p2; 130 int min, max;
131 } dot, vco, n, m, m1, m2, p, p1;
132
133 struct {
134 int dot_limit;
135 int p2_slow, p2_fast;
136 } p2;
134}; 137};
135 138
136/* returns HPLL frequency in kHz */ 139/* returns HPLL frequency in kHz */
@@ -185,6 +188,7 @@ intel_pch_rawclk(struct drm_i915_private *dev_priv)
185static int 188static int
186intel_vlv_hrawclk(struct drm_i915_private *dev_priv) 189intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
187{ 190{
191 /* RAWCLK_FREQ_VLV register updated from power well code */
188 return vlv_get_cck_clock_hpll(dev_priv, "hrawclk", 192 return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
189 CCK_DISPLAY_REF_CLOCK_CONTROL); 193 CCK_DISPLAY_REF_CLOCK_CONTROL);
190} 194}
@@ -218,7 +222,7 @@ intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
218 } 222 }
219} 223}
220 224
221static void intel_update_rawclk(struct drm_i915_private *dev_priv) 225void intel_update_rawclk(struct drm_i915_private *dev_priv)
222{ 226{
223 if (HAS_PCH_SPLIT(dev_priv)) 227 if (HAS_PCH_SPLIT(dev_priv))
224 dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv); 228 dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
@@ -255,7 +259,7 @@ intel_fdi_link_freq(struct drm_i915_private *dev_priv,
255 return 270000; 259 return 270000;
256} 260}
257 261
258static const intel_limit_t intel_limits_i8xx_dac = { 262static const struct intel_limit intel_limits_i8xx_dac = {
259 .dot = { .min = 25000, .max = 350000 }, 263 .dot = { .min = 25000, .max = 350000 },
260 .vco = { .min = 908000, .max = 1512000 }, 264 .vco = { .min = 908000, .max = 1512000 },
261 .n = { .min = 2, .max = 16 }, 265 .n = { .min = 2, .max = 16 },
@@ -268,7 +272,7 @@ static const intel_limit_t intel_limits_i8xx_dac = {
268 .p2_slow = 4, .p2_fast = 2 }, 272 .p2_slow = 4, .p2_fast = 2 },
269}; 273};
270 274
271static const intel_limit_t intel_limits_i8xx_dvo = { 275static const struct intel_limit intel_limits_i8xx_dvo = {
272 .dot = { .min = 25000, .max = 350000 }, 276 .dot = { .min = 25000, .max = 350000 },
273 .vco = { .min = 908000, .max = 1512000 }, 277 .vco = { .min = 908000, .max = 1512000 },
274 .n = { .min = 2, .max = 16 }, 278 .n = { .min = 2, .max = 16 },
@@ -281,7 +285,7 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
281 .p2_slow = 4, .p2_fast = 4 }, 285 .p2_slow = 4, .p2_fast = 4 },
282}; 286};
283 287
284static const intel_limit_t intel_limits_i8xx_lvds = { 288static const struct intel_limit intel_limits_i8xx_lvds = {
285 .dot = { .min = 25000, .max = 350000 }, 289 .dot = { .min = 25000, .max = 350000 },
286 .vco = { .min = 908000, .max = 1512000 }, 290 .vco = { .min = 908000, .max = 1512000 },
287 .n = { .min = 2, .max = 16 }, 291 .n = { .min = 2, .max = 16 },
@@ -294,7 +298,7 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
294 .p2_slow = 14, .p2_fast = 7 }, 298 .p2_slow = 14, .p2_fast = 7 },
295}; 299};
296 300
297static const intel_limit_t intel_limits_i9xx_sdvo = { 301static const struct intel_limit intel_limits_i9xx_sdvo = {
298 .dot = { .min = 20000, .max = 400000 }, 302 .dot = { .min = 20000, .max = 400000 },
299 .vco = { .min = 1400000, .max = 2800000 }, 303 .vco = { .min = 1400000, .max = 2800000 },
300 .n = { .min = 1, .max = 6 }, 304 .n = { .min = 1, .max = 6 },
@@ -307,7 +311,7 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
307 .p2_slow = 10, .p2_fast = 5 }, 311 .p2_slow = 10, .p2_fast = 5 },
308}; 312};
309 313
310static const intel_limit_t intel_limits_i9xx_lvds = { 314static const struct intel_limit intel_limits_i9xx_lvds = {
311 .dot = { .min = 20000, .max = 400000 }, 315 .dot = { .min = 20000, .max = 400000 },
312 .vco = { .min = 1400000, .max = 2800000 }, 316 .vco = { .min = 1400000, .max = 2800000 },
313 .n = { .min = 1, .max = 6 }, 317 .n = { .min = 1, .max = 6 },
@@ -321,7 +325,7 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
321}; 325};
322 326
323 327
324static const intel_limit_t intel_limits_g4x_sdvo = { 328static const struct intel_limit intel_limits_g4x_sdvo = {
325 .dot = { .min = 25000, .max = 270000 }, 329 .dot = { .min = 25000, .max = 270000 },
326 .vco = { .min = 1750000, .max = 3500000}, 330 .vco = { .min = 1750000, .max = 3500000},
327 .n = { .min = 1, .max = 4 }, 331 .n = { .min = 1, .max = 4 },
@@ -336,7 +340,7 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
336 }, 340 },
337}; 341};
338 342
339static const intel_limit_t intel_limits_g4x_hdmi = { 343static const struct intel_limit intel_limits_g4x_hdmi = {
340 .dot = { .min = 22000, .max = 400000 }, 344 .dot = { .min = 22000, .max = 400000 },
341 .vco = { .min = 1750000, .max = 3500000}, 345 .vco = { .min = 1750000, .max = 3500000},
342 .n = { .min = 1, .max = 4 }, 346 .n = { .min = 1, .max = 4 },
@@ -349,7 +353,7 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
349 .p2_slow = 10, .p2_fast = 5 }, 353 .p2_slow = 10, .p2_fast = 5 },
350}; 354};
351 355
352static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 356static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
353 .dot = { .min = 20000, .max = 115000 }, 357 .dot = { .min = 20000, .max = 115000 },
354 .vco = { .min = 1750000, .max = 3500000 }, 358 .vco = { .min = 1750000, .max = 3500000 },
355 .n = { .min = 1, .max = 3 }, 359 .n = { .min = 1, .max = 3 },
@@ -363,7 +367,7 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
363 }, 367 },
364}; 368};
365 369
366static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 370static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
367 .dot = { .min = 80000, .max = 224000 }, 371 .dot = { .min = 80000, .max = 224000 },
368 .vco = { .min = 1750000, .max = 3500000 }, 372 .vco = { .min = 1750000, .max = 3500000 },
369 .n = { .min = 1, .max = 3 }, 373 .n = { .min = 1, .max = 3 },
@@ -377,7 +381,7 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
377 }, 381 },
378}; 382};
379 383
380static const intel_limit_t intel_limits_pineview_sdvo = { 384static const struct intel_limit intel_limits_pineview_sdvo = {
381 .dot = { .min = 20000, .max = 400000}, 385 .dot = { .min = 20000, .max = 400000},
382 .vco = { .min = 1700000, .max = 3500000 }, 386 .vco = { .min = 1700000, .max = 3500000 },
383 /* Pineview's Ncounter is a ring counter */ 387 /* Pineview's Ncounter is a ring counter */
@@ -392,7 +396,7 @@ static const intel_limit_t intel_limits_pineview_sdvo = {
392 .p2_slow = 10, .p2_fast = 5 }, 396 .p2_slow = 10, .p2_fast = 5 },
393}; 397};
394 398
395static const intel_limit_t intel_limits_pineview_lvds = { 399static const struct intel_limit intel_limits_pineview_lvds = {
396 .dot = { .min = 20000, .max = 400000 }, 400 .dot = { .min = 20000, .max = 400000 },
397 .vco = { .min = 1700000, .max = 3500000 }, 401 .vco = { .min = 1700000, .max = 3500000 },
398 .n = { .min = 3, .max = 6 }, 402 .n = { .min = 3, .max = 6 },
@@ -410,7 +414,7 @@ static const intel_limit_t intel_limits_pineview_lvds = {
410 * We calculate clock using (register_value + 2) for N/M1/M2, so here 414 * We calculate clock using (register_value + 2) for N/M1/M2, so here
411 * the range value for them is (actual_value - 2). 415 * the range value for them is (actual_value - 2).
412 */ 416 */
413static const intel_limit_t intel_limits_ironlake_dac = { 417static const struct intel_limit intel_limits_ironlake_dac = {
414 .dot = { .min = 25000, .max = 350000 }, 418 .dot = { .min = 25000, .max = 350000 },
415 .vco = { .min = 1760000, .max = 3510000 }, 419 .vco = { .min = 1760000, .max = 3510000 },
416 .n = { .min = 1, .max = 5 }, 420 .n = { .min = 1, .max = 5 },
@@ -423,7 +427,7 @@ static const intel_limit_t intel_limits_ironlake_dac = {
423 .p2_slow = 10, .p2_fast = 5 }, 427 .p2_slow = 10, .p2_fast = 5 },
424}; 428};
425 429
426static const intel_limit_t intel_limits_ironlake_single_lvds = { 430static const struct intel_limit intel_limits_ironlake_single_lvds = {
427 .dot = { .min = 25000, .max = 350000 }, 431 .dot = { .min = 25000, .max = 350000 },
428 .vco = { .min = 1760000, .max = 3510000 }, 432 .vco = { .min = 1760000, .max = 3510000 },
429 .n = { .min = 1, .max = 3 }, 433 .n = { .min = 1, .max = 3 },
@@ -436,7 +440,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds = {
436 .p2_slow = 14, .p2_fast = 14 }, 440 .p2_slow = 14, .p2_fast = 14 },
437}; 441};
438 442
439static const intel_limit_t intel_limits_ironlake_dual_lvds = { 443static const struct intel_limit intel_limits_ironlake_dual_lvds = {
440 .dot = { .min = 25000, .max = 350000 }, 444 .dot = { .min = 25000, .max = 350000 },
441 .vco = { .min = 1760000, .max = 3510000 }, 445 .vco = { .min = 1760000, .max = 3510000 },
442 .n = { .min = 1, .max = 3 }, 446 .n = { .min = 1, .max = 3 },
@@ -450,7 +454,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds = {
450}; 454};
451 455
452/* LVDS 100mhz refclk limits. */ 456/* LVDS 100mhz refclk limits. */
453static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 457static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
454 .dot = { .min = 25000, .max = 350000 }, 458 .dot = { .min = 25000, .max = 350000 },
455 .vco = { .min = 1760000, .max = 3510000 }, 459 .vco = { .min = 1760000, .max = 3510000 },
456 .n = { .min = 1, .max = 2 }, 460 .n = { .min = 1, .max = 2 },
@@ -463,7 +467,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
463 .p2_slow = 14, .p2_fast = 14 }, 467 .p2_slow = 14, .p2_fast = 14 },
464}; 468};
465 469
466static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 470static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
467 .dot = { .min = 25000, .max = 350000 }, 471 .dot = { .min = 25000, .max = 350000 },
468 .vco = { .min = 1760000, .max = 3510000 }, 472 .vco = { .min = 1760000, .max = 3510000 },
469 .n = { .min = 1, .max = 3 }, 473 .n = { .min = 1, .max = 3 },
@@ -476,7 +480,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
476 .p2_slow = 7, .p2_fast = 7 }, 480 .p2_slow = 7, .p2_fast = 7 },
477}; 481};
478 482
479static const intel_limit_t intel_limits_vlv = { 483static const struct intel_limit intel_limits_vlv = {
480 /* 484 /*
481 * These are the data rate limits (measured in fast clocks) 485 * These are the data rate limits (measured in fast clocks)
482 * since those are the strictest limits we have. The fast 486 * since those are the strictest limits we have. The fast
@@ -492,7 +496,7 @@ static const intel_limit_t intel_limits_vlv = {
492 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 496 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
493}; 497};
494 498
495static const intel_limit_t intel_limits_chv = { 499static const struct intel_limit intel_limits_chv = {
496 /* 500 /*
497 * These are the data rate limits (measured in fast clocks) 501 * These are the data rate limits (measured in fast clocks)
498 * since those are the strictest limits we have. The fast 502 * since those are the strictest limits we have. The fast
@@ -508,7 +512,7 @@ static const intel_limit_t intel_limits_chv = {
508 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 512 .p2 = { .p2_slow = 1, .p2_fast = 14 },
509}; 513};
510 514
511static const intel_limit_t intel_limits_bxt = { 515static const struct intel_limit intel_limits_bxt = {
512 /* FIXME: find real dot limits */ 516 /* FIXME: find real dot limits */
513 .dot = { .min = 0, .max = INT_MAX }, 517 .dot = { .min = 0, .max = INT_MAX },
514 .vco = { .min = 4800000, .max = 6700000 }, 518 .vco = { .min = 4800000, .max = 6700000 },
@@ -581,7 +585,7 @@ static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
581 * divided-down version of it. 585 * divided-down version of it.
582 */ 586 */
583/* m1 is reserved as 0 in Pineview, n is a ring counter */ 587/* m1 is reserved as 0 in Pineview, n is a ring counter */
584static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock) 588static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
585{ 589{
586 clock->m = clock->m2 + 2; 590 clock->m = clock->m2 + 2;
587 clock->p = clock->p1 * clock->p2; 591 clock->p = clock->p1 * clock->p2;
@@ -598,7 +602,7 @@ static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
598 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 602 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
599} 603}
600 604
601static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock) 605static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
602{ 606{
603 clock->m = i9xx_dpll_compute_m(clock); 607 clock->m = i9xx_dpll_compute_m(clock);
604 clock->p = clock->p1 * clock->p2; 608 clock->p = clock->p1 * clock->p2;
@@ -610,7 +614,7 @@ static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
610 return clock->dot; 614 return clock->dot;
611} 615}
612 616
613static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock) 617static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
614{ 618{
615 clock->m = clock->m1 * clock->m2; 619 clock->m = clock->m1 * clock->m2;
616 clock->p = clock->p1 * clock->p2; 620 clock->p = clock->p1 * clock->p2;
@@ -622,7 +626,7 @@ static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
622 return clock->dot / 5; 626 return clock->dot / 5;
623} 627}
624 628
625int chv_calc_dpll_params(int refclk, intel_clock_t *clock) 629int chv_calc_dpll_params(int refclk, struct dpll *clock)
626{ 630{
627 clock->m = clock->m1 * clock->m2; 631 clock->m = clock->m1 * clock->m2;
628 clock->p = clock->p1 * clock->p2; 632 clock->p = clock->p1 * clock->p2;
@@ -642,8 +646,8 @@ int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
642 */ 646 */
643 647
644static bool intel_PLL_is_valid(struct drm_device *dev, 648static bool intel_PLL_is_valid(struct drm_device *dev,
645 const intel_limit_t *limit, 649 const struct intel_limit *limit,
646 const intel_clock_t *clock) 650 const struct dpll *clock)
647{ 651{
648 if (clock->n < limit->n.min || limit->n.max < clock->n) 652 if (clock->n < limit->n.min || limit->n.max < clock->n)
649 INTELPllInvalid("n out of range\n"); 653 INTELPllInvalid("n out of range\n");
@@ -678,7 +682,7 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
678} 682}
679 683
680static int 684static int
681i9xx_select_p2_div(const intel_limit_t *limit, 685i9xx_select_p2_div(const struct intel_limit *limit,
682 const struct intel_crtc_state *crtc_state, 686 const struct intel_crtc_state *crtc_state,
683 int target) 687 int target)
684{ 688{
@@ -713,13 +717,13 @@ i9xx_select_p2_div(const intel_limit_t *limit,
713 * divider from @match_clock used for LVDS downclocking. 717 * divider from @match_clock used for LVDS downclocking.
714 */ 718 */
715static bool 719static bool
716i9xx_find_best_dpll(const intel_limit_t *limit, 720i9xx_find_best_dpll(const struct intel_limit *limit,
717 struct intel_crtc_state *crtc_state, 721 struct intel_crtc_state *crtc_state,
718 int target, int refclk, intel_clock_t *match_clock, 722 int target, int refclk, struct dpll *match_clock,
719 intel_clock_t *best_clock) 723 struct dpll *best_clock)
720{ 724{
721 struct drm_device *dev = crtc_state->base.crtc->dev; 725 struct drm_device *dev = crtc_state->base.crtc->dev;
722 intel_clock_t clock; 726 struct dpll clock;
723 int err = target; 727 int err = target;
724 728
725 memset(best_clock, 0, sizeof(*best_clock)); 729 memset(best_clock, 0, sizeof(*best_clock));
@@ -770,13 +774,13 @@ i9xx_find_best_dpll(const intel_limit_t *limit,
770 * divider from @match_clock used for LVDS downclocking. 774 * divider from @match_clock used for LVDS downclocking.
771 */ 775 */
772static bool 776static bool
773pnv_find_best_dpll(const intel_limit_t *limit, 777pnv_find_best_dpll(const struct intel_limit *limit,
774 struct intel_crtc_state *crtc_state, 778 struct intel_crtc_state *crtc_state,
775 int target, int refclk, intel_clock_t *match_clock, 779 int target, int refclk, struct dpll *match_clock,
776 intel_clock_t *best_clock) 780 struct dpll *best_clock)
777{ 781{
778 struct drm_device *dev = crtc_state->base.crtc->dev; 782 struct drm_device *dev = crtc_state->base.crtc->dev;
779 intel_clock_t clock; 783 struct dpll clock;
780 int err = target; 784 int err = target;
781 785
782 memset(best_clock, 0, sizeof(*best_clock)); 786 memset(best_clock, 0, sizeof(*best_clock));
@@ -825,13 +829,13 @@ pnv_find_best_dpll(const intel_limit_t *limit,
825 * divider from @match_clock used for LVDS downclocking. 829 * divider from @match_clock used for LVDS downclocking.
826 */ 830 */
827static bool 831static bool
828g4x_find_best_dpll(const intel_limit_t *limit, 832g4x_find_best_dpll(const struct intel_limit *limit,
829 struct intel_crtc_state *crtc_state, 833 struct intel_crtc_state *crtc_state,
830 int target, int refclk, intel_clock_t *match_clock, 834 int target, int refclk, struct dpll *match_clock,
831 intel_clock_t *best_clock) 835 struct dpll *best_clock)
832{ 836{
833 struct drm_device *dev = crtc_state->base.crtc->dev; 837 struct drm_device *dev = crtc_state->base.crtc->dev;
834 intel_clock_t clock; 838 struct dpll clock;
835 int max_n; 839 int max_n;
836 bool found = false; 840 bool found = false;
837 /* approximately equals target * 0.00585 */ 841 /* approximately equals target * 0.00585 */
@@ -877,8 +881,8 @@ g4x_find_best_dpll(const intel_limit_t *limit,
877 * best configuration and error found so far. Return the calculated error. 881 * best configuration and error found so far. Return the calculated error.
878 */ 882 */
879static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 883static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
880 const intel_clock_t *calculated_clock, 884 const struct dpll *calculated_clock,
881 const intel_clock_t *best_clock, 885 const struct dpll *best_clock,
882 unsigned int best_error_ppm, 886 unsigned int best_error_ppm,
883 unsigned int *error_ppm) 887 unsigned int *error_ppm)
884{ 888{
@@ -918,14 +922,14 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
918 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 922 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
919 */ 923 */
920static bool 924static bool
921vlv_find_best_dpll(const intel_limit_t *limit, 925vlv_find_best_dpll(const struct intel_limit *limit,
922 struct intel_crtc_state *crtc_state, 926 struct intel_crtc_state *crtc_state,
923 int target, int refclk, intel_clock_t *match_clock, 927 int target, int refclk, struct dpll *match_clock,
924 intel_clock_t *best_clock) 928 struct dpll *best_clock)
925{ 929{
926 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 930 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
927 struct drm_device *dev = crtc->base.dev; 931 struct drm_device *dev = crtc->base.dev;
928 intel_clock_t clock; 932 struct dpll clock;
929 unsigned int bestppm = 1000000; 933 unsigned int bestppm = 1000000;
930 /* min update 19.2 MHz */ 934 /* min update 19.2 MHz */
931 int max_n = min(limit->n.max, refclk / 19200); 935 int max_n = min(limit->n.max, refclk / 19200);
@@ -977,15 +981,15 @@ vlv_find_best_dpll(const intel_limit_t *limit,
977 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 981 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
978 */ 982 */
979static bool 983static bool
980chv_find_best_dpll(const intel_limit_t *limit, 984chv_find_best_dpll(const struct intel_limit *limit,
981 struct intel_crtc_state *crtc_state, 985 struct intel_crtc_state *crtc_state,
982 int target, int refclk, intel_clock_t *match_clock, 986 int target, int refclk, struct dpll *match_clock,
983 intel_clock_t *best_clock) 987 struct dpll *best_clock)
984{ 988{
985 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 989 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
986 struct drm_device *dev = crtc->base.dev; 990 struct drm_device *dev = crtc->base.dev;
987 unsigned int best_error_ppm; 991 unsigned int best_error_ppm;
988 intel_clock_t clock; 992 struct dpll clock;
989 uint64_t m2; 993 uint64_t m2;
990 int found = false; 994 int found = false;
991 995
@@ -1035,10 +1039,10 @@ chv_find_best_dpll(const intel_limit_t *limit,
1035} 1039}
1036 1040
1037bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 1041bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1038 intel_clock_t *best_clock) 1042 struct dpll *best_clock)
1039{ 1043{
1040 int refclk = 100000; 1044 int refclk = 100000;
1041 const intel_limit_t *limit = &intel_limits_bxt; 1045 const struct intel_limit *limit = &intel_limits_bxt;
1042 1046
1043 return chv_find_best_dpll(limit, crtc_state, 1047 return chv_find_best_dpll(limit, crtc_state,
1044 target_clock, refclk, NULL, best_clock); 1048 target_clock, refclk, NULL, best_clock);
@@ -1203,7 +1207,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1203 u32 val; 1207 u32 val;
1204 1208
1205 /* ILK FDI PLL is always enabled */ 1209 /* ILK FDI PLL is always enabled */
1206 if (INTEL_INFO(dev_priv)->gen == 5) 1210 if (IS_GEN5(dev_priv))
1207 return; 1211 return;
1208 1212
1209 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1213 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@ -2309,7 +2313,7 @@ err_pm:
2309 return ret; 2313 return ret;
2310} 2314}
2311 2315
2312static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) 2316void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
2313{ 2317{
2314 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2318 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2315 struct i915_ggtt_view view; 2319 struct i915_ggtt_view view;
@@ -3110,17 +3114,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3110 return -ENODEV; 3114 return -ENODEV;
3111} 3115}
3112 3116
3113static void intel_complete_page_flips(struct drm_device *dev) 3117static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
3114{ 3118{
3115 struct drm_crtc *crtc; 3119 struct intel_crtc *crtc;
3116
3117 for_each_crtc(dev, crtc) {
3118 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3119 enum plane plane = intel_crtc->plane;
3120 3120
3121 intel_prepare_page_flip(dev, plane); 3121 for_each_intel_crtc(dev_priv->dev, crtc)
3122 intel_finish_page_flip_plane(dev, plane); 3122 intel_finish_page_flip_cs(dev_priv, crtc->pipe);
3123 }
3124} 3123}
3125 3124
3126static void intel_update_primary_planes(struct drm_device *dev) 3125static void intel_update_primary_planes(struct drm_device *dev)
@@ -3143,41 +3142,39 @@ static void intel_update_primary_planes(struct drm_device *dev)
3143 } 3142 }
3144} 3143}
3145 3144
3146void intel_prepare_reset(struct drm_device *dev) 3145void intel_prepare_reset(struct drm_i915_private *dev_priv)
3147{ 3146{
3148 /* no reset support for gen2 */ 3147 /* no reset support for gen2 */
3149 if (IS_GEN2(dev)) 3148 if (IS_GEN2(dev_priv))
3150 return; 3149 return;
3151 3150
3152 /* reset doesn't touch the display */ 3151 /* reset doesn't touch the display */
3153 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 3152 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3154 return; 3153 return;
3155 3154
3156 drm_modeset_lock_all(dev); 3155 drm_modeset_lock_all(dev_priv->dev);
3157 /* 3156 /*
3158 * Disabling the crtcs gracefully seems nicer. Also the 3157 * Disabling the crtcs gracefully seems nicer. Also the
3159 * g33 docs say we should at least disable all the planes. 3158 * g33 docs say we should at least disable all the planes.
3160 */ 3159 */
3161 intel_display_suspend(dev); 3160 intel_display_suspend(dev_priv->dev);
3162} 3161}
3163 3162
3164void intel_finish_reset(struct drm_device *dev) 3163void intel_finish_reset(struct drm_i915_private *dev_priv)
3165{ 3164{
3166 struct drm_i915_private *dev_priv = to_i915(dev);
3167
3168 /* 3165 /*
3169 * Flips in the rings will be nuked by the reset, 3166 * Flips in the rings will be nuked by the reset,
3170 * so complete all pending flips so that user space 3167 * so complete all pending flips so that user space
3171 * will get its events and not get stuck. 3168 * will get its events and not get stuck.
3172 */ 3169 */
3173 intel_complete_page_flips(dev); 3170 intel_complete_page_flips(dev_priv);
3174 3171
3175 /* no reset support for gen2 */ 3172 /* no reset support for gen2 */
3176 if (IS_GEN2(dev)) 3173 if (IS_GEN2(dev_priv))
3177 return; 3174 return;
3178 3175
3179 /* reset doesn't touch the display */ 3176 /* reset doesn't touch the display */
3180 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) { 3177 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
3181 /* 3178 /*
3182 * Flips in the rings have been nuked by the reset, 3179 * Flips in the rings have been nuked by the reset,
3183 * so update the base address of all primary 3180 * so update the base address of all primary
@@ -3187,7 +3184,7 @@ void intel_finish_reset(struct drm_device *dev)
3187 * FIXME: Atomic will make this obsolete since we won't schedule 3184 * FIXME: Atomic will make this obsolete since we won't schedule
3188 * CS-based flips (which might get lost in gpu resets) any more. 3185 * CS-based flips (which might get lost in gpu resets) any more.
3189 */ 3186 */
3190 intel_update_primary_planes(dev); 3187 intel_update_primary_planes(dev_priv->dev);
3191 return; 3188 return;
3192 } 3189 }
3193 3190
@@ -3198,18 +3195,18 @@ void intel_finish_reset(struct drm_device *dev)
3198 intel_runtime_pm_disable_interrupts(dev_priv); 3195 intel_runtime_pm_disable_interrupts(dev_priv);
3199 intel_runtime_pm_enable_interrupts(dev_priv); 3196 intel_runtime_pm_enable_interrupts(dev_priv);
3200 3197
3201 intel_modeset_init_hw(dev); 3198 intel_modeset_init_hw(dev_priv->dev);
3202 3199
3203 spin_lock_irq(&dev_priv->irq_lock); 3200 spin_lock_irq(&dev_priv->irq_lock);
3204 if (dev_priv->display.hpd_irq_setup) 3201 if (dev_priv->display.hpd_irq_setup)
3205 dev_priv->display.hpd_irq_setup(dev); 3202 dev_priv->display.hpd_irq_setup(dev_priv);
3206 spin_unlock_irq(&dev_priv->irq_lock); 3203 spin_unlock_irq(&dev_priv->irq_lock);
3207 3204
3208 intel_display_resume(dev); 3205 intel_display_resume(dev_priv->dev);
3209 3206
3210 intel_hpd_init(dev_priv); 3207 intel_hpd_init(dev_priv);
3211 3208
3212 drm_modeset_unlock_all(dev); 3209 drm_modeset_unlock_all(dev_priv->dev);
3213} 3210}
3214 3211
3215static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3212static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
@@ -3224,7 +3221,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3224 return false; 3221 return false;
3225 3222
3226 spin_lock_irq(&dev->event_lock); 3223 spin_lock_irq(&dev->event_lock);
3227 pending = to_intel_crtc(crtc)->unpin_work != NULL; 3224 pending = to_intel_crtc(crtc)->flip_work != NULL;
3228 spin_unlock_irq(&dev->event_lock); 3225 spin_unlock_irq(&dev->event_lock);
3229 3226
3230 return pending; 3227 return pending;
@@ -3803,7 +3800,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
3803 if (atomic_read(&crtc->unpin_work_count) == 0) 3800 if (atomic_read(&crtc->unpin_work_count) == 0)
3804 continue; 3801 continue;
3805 3802
3806 if (crtc->unpin_work) 3803 if (crtc->flip_work)
3807 intel_wait_for_vblank(dev, crtc->pipe); 3804 intel_wait_for_vblank(dev, crtc->pipe);
3808 3805
3809 return true; 3806 return true;
@@ -3815,11 +3812,9 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
3815static void page_flip_completed(struct intel_crtc *intel_crtc) 3812static void page_flip_completed(struct intel_crtc *intel_crtc)
3816{ 3813{
3817 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 3814 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3818 struct intel_unpin_work *work = intel_crtc->unpin_work; 3815 struct intel_flip_work *work = intel_crtc->flip_work;
3819 3816
3820 /* ensure that the unpin work is consistent wrt ->pending. */ 3817 intel_crtc->flip_work = NULL;
3821 smp_rmb();
3822 intel_crtc->unpin_work = NULL;
3823 3818
3824 if (work->event) 3819 if (work->event)
3825 drm_crtc_send_vblank_event(&intel_crtc->base, work->event); 3820 drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
@@ -3827,7 +3822,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
3827 drm_crtc_vblank_put(&intel_crtc->base); 3822 drm_crtc_vblank_put(&intel_crtc->base);
3828 3823
3829 wake_up_all(&dev_priv->pending_flip_queue); 3824 wake_up_all(&dev_priv->pending_flip_queue);
3830 queue_work(dev_priv->wq, &work->work); 3825 queue_work(dev_priv->wq, &work->unpin_work);
3831 3826
3832 trace_i915_flip_complete(intel_crtc->plane, 3827 trace_i915_flip_complete(intel_crtc->plane,
3833 work->pending_flip_obj); 3828 work->pending_flip_obj);
@@ -3851,9 +3846,11 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3851 3846
3852 if (ret == 0) { 3847 if (ret == 0) {
3853 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3848 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3849 struct intel_flip_work *work;
3854 3850
3855 spin_lock_irq(&dev->event_lock); 3851 spin_lock_irq(&dev->event_lock);
3856 if (intel_crtc->unpin_work) { 3852 work = intel_crtc->flip_work;
3853 if (work && !is_mmio_work(work)) {
3857 WARN_ONCE(1, "Removing stuck page flip\n"); 3854 WARN_ONCE(1, "Removing stuck page flip\n");
3858 page_flip_completed(intel_crtc); 3855 page_flip_completed(intel_crtc);
3859 } 3856 }
@@ -4281,8 +4278,9 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
4281 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); 4278 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4282 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4279 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4283 4280
4284 DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n", 4281 DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
4285 intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX); 4282 intel_crtc->base.base.id, intel_crtc->base.name,
4283 intel_crtc->pipe, SKL_CRTC_INDEX);
4286 4284
4287 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4285 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4288 &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0), 4286 &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
@@ -4312,9 +4310,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4312 4310
4313 bool force_detach = !fb || !plane_state->visible; 4311 bool force_detach = !fb || !plane_state->visible;
4314 4312
4315 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n", 4313 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
4316 intel_plane->base.base.id, intel_crtc->pipe, 4314 intel_plane->base.base.id, intel_plane->base.name,
4317 drm_plane_index(&intel_plane->base)); 4315 intel_crtc->pipe, drm_plane_index(&intel_plane->base));
4318 4316
4319 ret = skl_update_scaler(crtc_state, force_detach, 4317 ret = skl_update_scaler(crtc_state, force_detach,
4320 drm_plane_index(&intel_plane->base), 4318 drm_plane_index(&intel_plane->base),
@@ -4330,8 +4328,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4330 4328
4331 /* check colorkey */ 4329 /* check colorkey */
4332 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { 4330 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4333 DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed", 4331 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
4334 intel_plane->base.base.id); 4332 intel_plane->base.base.id,
4333 intel_plane->base.name);
4335 return -EINVAL; 4334 return -EINVAL;
4336 } 4335 }
4337 4336
@@ -4350,8 +4349,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4350 case DRM_FORMAT_VYUY: 4349 case DRM_FORMAT_VYUY:
4351 break; 4350 break;
4352 default: 4351 default:
4353 DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n", 4352 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
4354 intel_plane->base.base.id, fb->base.id, fb->pixel_format); 4353 intel_plane->base.base.id, intel_plane->base.name,
4354 fb->base.id, fb->pixel_format);
4355 return -EINVAL; 4355 return -EINVAL;
4356 } 4356 }
4357 4357
@@ -5269,21 +5269,34 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5269 return max_cdclk_freq*90/100; 5269 return max_cdclk_freq*90/100;
5270} 5270}
5271 5271
5272static int skl_calc_cdclk(int max_pixclk, int vco);
5273
5272static void intel_update_max_cdclk(struct drm_device *dev) 5274static void intel_update_max_cdclk(struct drm_device *dev)
5273{ 5275{
5274 struct drm_i915_private *dev_priv = dev->dev_private; 5276 struct drm_i915_private *dev_priv = dev->dev_private;
5275 5277
5276 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5278 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5277 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; 5279 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5280 int max_cdclk, vco;
5281
5282 vco = dev_priv->skl_preferred_vco_freq;
5283 WARN_ON(vco != 8100000 && vco != 8640000);
5278 5284
5285 /*
5286 * Use the lower (vco 8640) cdclk values as a
5287 * first guess. skl_calc_cdclk() will correct it
5288 * if the preferred vco is 8100 instead.
5289 */
5279 if (limit == SKL_DFSM_CDCLK_LIMIT_675) 5290 if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5280 dev_priv->max_cdclk_freq = 675000; 5291 max_cdclk = 617143;
5281 else if (limit == SKL_DFSM_CDCLK_LIMIT_540) 5292 else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5282 dev_priv->max_cdclk_freq = 540000; 5293 max_cdclk = 540000;
5283 else if (limit == SKL_DFSM_CDCLK_LIMIT_450) 5294 else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5284 dev_priv->max_cdclk_freq = 450000; 5295 max_cdclk = 432000;
5285 else 5296 else
5286 dev_priv->max_cdclk_freq = 337500; 5297 max_cdclk = 308571;
5298
5299 dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
5287 } else if (IS_BROXTON(dev)) { 5300 } else if (IS_BROXTON(dev)) {
5288 dev_priv->max_cdclk_freq = 624000; 5301 dev_priv->max_cdclk_freq = 624000;
5289 } else if (IS_BROADWELL(dev)) { 5302 } else if (IS_BROADWELL(dev)) {
@@ -5324,264 +5337,313 @@ static void intel_update_cdclk(struct drm_device *dev)
5324 struct drm_i915_private *dev_priv = dev->dev_private; 5337 struct drm_i915_private *dev_priv = dev->dev_private;
5325 5338
5326 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 5339 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5327 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", 5340
5328 dev_priv->cdclk_freq); 5341 if (INTEL_GEN(dev_priv) >= 9)
5342 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
5343 dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco,
5344 dev_priv->cdclk_pll.ref);
5345 else
5346 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5347 dev_priv->cdclk_freq);
5329 5348
5330 /* 5349 /*
5331 * Program the gmbus_freq based on the cdclk frequency. 5350 * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
5332 * BSpec erroneously claims we should aim for 4MHz, but 5351 * Programmng [sic] note: bit[9:2] should be programmed to the number
5333 * in fact 1MHz is the correct frequency. 5352 * of cdclk that generates 4MHz reference clock freq which is used to
5353 * generate GMBus clock. This will vary with the cdclk freq.
5334 */ 5354 */
5335 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 5355 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5336 /*
5337 * Program the gmbus_freq based on the cdclk frequency.
5338 * BSpec erroneously claims we should aim for 4MHz, but
5339 * in fact 1MHz is the correct frequency.
5340 */
5341 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000)); 5356 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5342 } 5357}
5343 5358
5344 if (dev_priv->max_cdclk_freq == 0) 5359/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5345 intel_update_max_cdclk(dev); 5360static int skl_cdclk_decimal(int cdclk)
5361{
5362 return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
5346} 5363}
5347 5364
5348static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency) 5365static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
5349{ 5366{
5350 uint32_t divider; 5367 int ratio;
5351 uint32_t ratio; 5368
5352 uint32_t current_freq; 5369 if (cdclk == dev_priv->cdclk_pll.ref)
5353 int ret; 5370 return 0;
5354 5371
5355 /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */ 5372 switch (cdclk) {
5356 switch (frequency) { 5373 default:
5374 MISSING_CASE(cdclk);
5357 case 144000: 5375 case 144000:
5376 case 288000:
5377 case 384000:
5378 case 576000:
5379 ratio = 60;
5380 break;
5381 case 624000:
5382 ratio = 65;
5383 break;
5384 }
5385
5386 return dev_priv->cdclk_pll.ref * ratio;
5387}
5388
5389static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
5390{
5391 I915_WRITE(BXT_DE_PLL_ENABLE, 0);
5392
5393 /* Timeout 200us */
5394 if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
5395 DRM_ERROR("timeout waiting for DE PLL unlock\n");
5396
5397 dev_priv->cdclk_pll.vco = 0;
5398}
5399
5400static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
5401{
5402 int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref);
5403 u32 val;
5404
5405 val = I915_READ(BXT_DE_PLL_CTL);
5406 val &= ~BXT_DE_PLL_RATIO_MASK;
5407 val |= BXT_DE_PLL_RATIO(ratio);
5408 I915_WRITE(BXT_DE_PLL_CTL, val);
5409
5410 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5411
5412 /* Timeout 200us */
5413 if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
5414 DRM_ERROR("timeout waiting for DE PLL lock\n");
5415
5416 dev_priv->cdclk_pll.vco = vco;
5417}
5418
5419static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
5420{
5421 u32 val, divider;
5422 int vco, ret;
5423
5424 vco = bxt_de_pll_vco(dev_priv, cdclk);
5425
5426 DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
5427
5428 /* cdclk = vco / 2 / div{1,1.5,2,4} */
5429 switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
5430 case 8:
5358 divider = BXT_CDCLK_CD2X_DIV_SEL_4; 5431 divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5359 ratio = BXT_DE_PLL_RATIO(60);
5360 break; 5432 break;
5361 case 288000: 5433 case 4:
5362 divider = BXT_CDCLK_CD2X_DIV_SEL_2; 5434 divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5363 ratio = BXT_DE_PLL_RATIO(60);
5364 break; 5435 break;
5365 case 384000: 5436 case 3:
5366 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; 5437 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5367 ratio = BXT_DE_PLL_RATIO(60);
5368 break;
5369 case 576000:
5370 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5371 ratio = BXT_DE_PLL_RATIO(60);
5372 break; 5438 break;
5373 case 624000: 5439 case 2:
5374 divider = BXT_CDCLK_CD2X_DIV_SEL_1; 5440 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5375 ratio = BXT_DE_PLL_RATIO(65);
5376 break;
5377 case 19200:
5378 /*
5379 * Bypass frequency with DE PLL disabled. Init ratio, divider
5380 * to suppress GCC warning.
5381 */
5382 ratio = 0;
5383 divider = 0;
5384 break; 5441 break;
5385 default: 5442 default:
5386 DRM_ERROR("unsupported CDCLK freq %d", frequency); 5443 WARN_ON(cdclk != dev_priv->cdclk_pll.ref);
5444 WARN_ON(vco != 0);
5387 5445
5388 return; 5446 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5447 break;
5389 } 5448 }
5390 5449
5391 mutex_lock(&dev_priv->rps.hw_lock);
5392 /* Inform power controller of upcoming frequency change */ 5450 /* Inform power controller of upcoming frequency change */
5451 mutex_lock(&dev_priv->rps.hw_lock);
5393 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5452 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5394 0x80000000); 5453 0x80000000);
5395 mutex_unlock(&dev_priv->rps.hw_lock); 5454 mutex_unlock(&dev_priv->rps.hw_lock);
5396 5455
5397 if (ret) { 5456 if (ret) {
5398 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", 5457 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5399 ret, frequency); 5458 ret, cdclk);
5400 return; 5459 return;
5401 } 5460 }
5402 5461
5403 current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK; 5462 if (dev_priv->cdclk_pll.vco != 0 &&
5404 /* convert from .1 fixpoint MHz with -1MHz offset to kHz */ 5463 dev_priv->cdclk_pll.vco != vco)
5405 current_freq = current_freq * 500 + 1000; 5464 bxt_de_pll_disable(dev_priv);
5406 5465
5407 /* 5466 if (dev_priv->cdclk_pll.vco != vco)
5408 * DE PLL has to be disabled when 5467 bxt_de_pll_enable(dev_priv, vco);
5409 * - setting to 19.2MHz (bypass, PLL isn't used)
5410 * - before setting to 624MHz (PLL needs toggling)
5411 * - before setting to any frequency from 624MHz (PLL needs toggling)
5412 */
5413 if (frequency == 19200 || frequency == 624000 ||
5414 current_freq == 624000) {
5415 I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5416 /* Timeout 200us */
5417 if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5418 1))
5419 DRM_ERROR("timout waiting for DE PLL unlock\n");
5420 }
5421
5422 if (frequency != 19200) {
5423 uint32_t val;
5424
5425 val = I915_READ(BXT_DE_PLL_CTL);
5426 val &= ~BXT_DE_PLL_RATIO_MASK;
5427 val |= ratio;
5428 I915_WRITE(BXT_DE_PLL_CTL, val);
5429
5430 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5431 /* Timeout 200us */
5432 if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5433 DRM_ERROR("timeout waiting for DE PLL lock\n");
5434
5435 val = I915_READ(CDCLK_CTL);
5436 val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
5437 val |= divider;
5438 /*
5439 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5440 * enable otherwise.
5441 */
5442 val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5443 if (frequency >= 500000)
5444 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5445 5468
5446 val &= ~CDCLK_FREQ_DECIMAL_MASK; 5469 val = divider | skl_cdclk_decimal(cdclk);
5447 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */ 5470 /*
5448 val |= (frequency - 1000) / 500; 5471 * FIXME if only the cd2x divider needs changing, it could be done
5449 I915_WRITE(CDCLK_CTL, val); 5472 * without shutting off the pipe (if only one pipe is active).
5450 } 5473 */
5474 val |= BXT_CDCLK_CD2X_PIPE_NONE;
5475 /*
5476 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5477 * enable otherwise.
5478 */
5479 if (cdclk >= 500000)
5480 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5481 I915_WRITE(CDCLK_CTL, val);
5451 5482
5452 mutex_lock(&dev_priv->rps.hw_lock); 5483 mutex_lock(&dev_priv->rps.hw_lock);
5453 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5484 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5454 DIV_ROUND_UP(frequency, 25000)); 5485 DIV_ROUND_UP(cdclk, 25000));
5455 mutex_unlock(&dev_priv->rps.hw_lock); 5486 mutex_unlock(&dev_priv->rps.hw_lock);
5456 5487
5457 if (ret) { 5488 if (ret) {
5458 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", 5489 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5459 ret, frequency); 5490 ret, cdclk);
5460 return; 5491 return;
5461 } 5492 }
5462 5493
5463 intel_update_cdclk(dev_priv->dev); 5494 intel_update_cdclk(dev_priv->dev);
5464} 5495}
5465 5496
5466static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv) 5497static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
5467{ 5498{
5468 if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE)) 5499 u32 cdctl, expected;
5469 return false;
5470 5500
5471 /* TODO: Check for a valid CDCLK rate */ 5501 intel_update_cdclk(dev_priv->dev);
5472 5502
5473 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) { 5503 if (dev_priv->cdclk_pll.vco == 0 ||
5474 DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n"); 5504 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
5505 goto sanitize;
5475 5506
5476 return false; 5507 /* DPLL okay; verify the cdclock
5477 } 5508 *
5509 * Some BIOS versions leave an incorrect decimal frequency value and
5510 * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
5511 * so sanitize this register.
5512 */
5513 cdctl = I915_READ(CDCLK_CTL);
5514 /*
5515 * Let's ignore the pipe field, since BIOS could have configured the
5516 * dividers both synching to an active pipe, or asynchronously
5517 * (PIPE_NONE).
5518 */
5519 cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
5478 5520
5479 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) { 5521 expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
5480 DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n"); 5522 skl_cdclk_decimal(dev_priv->cdclk_freq);
5523 /*
5524 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5525 * enable otherwise.
5526 */
5527 if (dev_priv->cdclk_freq >= 500000)
5528 expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5481 5529
5482 return false; 5530 if (cdctl == expected)
5483 } 5531 /* All well; nothing to sanitize */
5532 return;
5484 5533
5485 return true; 5534sanitize:
5486} 5535 DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
5487 5536
5488bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv) 5537 /* force cdclk programming */
5489{ 5538 dev_priv->cdclk_freq = 0;
5490 return broxton_cdclk_is_enabled(dev_priv); 5539
5540 /* force full PLL disable + enable */
5541 dev_priv->cdclk_pll.vco = -1;
5491} 5542}
5492 5543
5493void broxton_init_cdclk(struct drm_i915_private *dev_priv) 5544void broxton_init_cdclk(struct drm_i915_private *dev_priv)
5494{ 5545{
5495 /* check if cd clock is enabled */ 5546 bxt_sanitize_cdclk(dev_priv);
5496 if (broxton_cdclk_is_enabled(dev_priv)) {
5497 DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
5498 return;
5499 }
5500 5547
5501 DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n"); 5548 if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
5549 return;
5502 5550
5503 /* 5551 /*
5504 * FIXME: 5552 * FIXME:
5505 * - The initial CDCLK needs to be read from VBT. 5553 * - The initial CDCLK needs to be read from VBT.
5506 * Need to make this change after VBT has changes for BXT. 5554 * Need to make this change after VBT has changes for BXT.
5507 * - check if setting the max (or any) cdclk freq is really necessary
5508 * here, it belongs to modeset time
5509 */ 5555 */
5510 broxton_set_cdclk(dev_priv, 624000); 5556 broxton_set_cdclk(dev_priv, broxton_calc_cdclk(0));
5511 5557}
5512 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5513 POSTING_READ(DBUF_CTL);
5514 5558
5515 udelay(10); 5559void broxton_uninit_cdclk(struct drm_i915_private *dev_priv)
5560{
5561 broxton_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref);
5562}
5516 5563
5517 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 5564static int skl_calc_cdclk(int max_pixclk, int vco)
5518 DRM_ERROR("DBuf power enable timeout!\n"); 5565{
5566 if (vco == 8640000) {
5567 if (max_pixclk > 540000)
5568 return 617143;
5569 else if (max_pixclk > 432000)
5570 return 540000;
5571 else if (max_pixclk > 308571)
5572 return 432000;
5573 else
5574 return 308571;
5575 } else {
5576 if (max_pixclk > 540000)
5577 return 675000;
5578 else if (max_pixclk > 450000)
5579 return 540000;
5580 else if (max_pixclk > 337500)
5581 return 450000;
5582 else
5583 return 337500;
5584 }
5519} 5585}
5520 5586
5521void broxton_uninit_cdclk(struct drm_i915_private *dev_priv) 5587static void
5588skl_dpll0_update(struct drm_i915_private *dev_priv)
5522{ 5589{
5523 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); 5590 u32 val;
5524 POSTING_READ(DBUF_CTL);
5525 5591
5526 udelay(10); 5592 dev_priv->cdclk_pll.ref = 24000;
5593 dev_priv->cdclk_pll.vco = 0;
5527 5594
5528 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 5595 val = I915_READ(LCPLL1_CTL);
5529 DRM_ERROR("DBuf power disable timeout!\n"); 5596 if ((val & LCPLL_PLL_ENABLE) == 0)
5597 return;
5530 5598
5531 /* Set minimum (bypass) frequency, in effect turning off the DE PLL */ 5599 if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
5532 broxton_set_cdclk(dev_priv, 19200); 5600 return;
5533}
5534 5601
5535static const struct skl_cdclk_entry { 5602 val = I915_READ(DPLL_CTRL1);
5536 unsigned int freq;
5537 unsigned int vco;
5538} skl_cdclk_frequencies[] = {
5539 { .freq = 308570, .vco = 8640 },
5540 { .freq = 337500, .vco = 8100 },
5541 { .freq = 432000, .vco = 8640 },
5542 { .freq = 450000, .vco = 8100 },
5543 { .freq = 540000, .vco = 8100 },
5544 { .freq = 617140, .vco = 8640 },
5545 { .freq = 675000, .vco = 8100 },
5546};
5547 5603
5548static unsigned int skl_cdclk_decimal(unsigned int freq) 5604 if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
5549{ 5605 DPLL_CTRL1_SSC(SKL_DPLL0) |
5550 return (freq - 1000) / 500; 5606 DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
5607 DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
5608 return;
5609
5610 switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
5611 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
5612 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
5613 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
5614 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
5615 dev_priv->cdclk_pll.vco = 8100000;
5616 break;
5617 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
5618 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
5619 dev_priv->cdclk_pll.vco = 8640000;
5620 break;
5621 default:
5622 MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5623 break;
5624 }
5551} 5625}
5552 5626
5553static unsigned int skl_cdclk_get_vco(unsigned int freq) 5627void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
5554{ 5628{
5555 unsigned int i; 5629 bool changed = dev_priv->skl_preferred_vco_freq != vco;
5556 5630
5557 for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) { 5631 dev_priv->skl_preferred_vco_freq = vco;
5558 const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5559
5560 if (e->freq == freq)
5561 return e->vco;
5562 }
5563 5632
5564 return 8100; 5633 if (changed)
5634 intel_update_max_cdclk(dev_priv->dev);
5565} 5635}
5566 5636
5567static void 5637static void
5568skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) 5638skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
5569{ 5639{
5570 unsigned int min_freq; 5640 int min_cdclk = skl_calc_cdclk(0, vco);
5571 u32 val; 5641 u32 val;
5572 5642
5573 /* select the minimum CDCLK before enabling DPLL 0 */ 5643 WARN_ON(vco != 8100000 && vco != 8640000);
5574 val = I915_READ(CDCLK_CTL);
5575 val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
5576 val |= CDCLK_FREQ_337_308;
5577
5578 if (required_vco == 8640)
5579 min_freq = 308570;
5580 else
5581 min_freq = 337500;
5582
5583 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5584 5644
5645 /* select the minimum CDCLK before enabling DPLL 0 */
5646 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
5585 I915_WRITE(CDCLK_CTL, val); 5647 I915_WRITE(CDCLK_CTL, val);
5586 POSTING_READ(CDCLK_CTL); 5648 POSTING_READ(CDCLK_CTL);
5587 5649
@@ -5592,14 +5654,14 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5592 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640. 5654 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5593 * The modeset code is responsible for the selection of the exact link 5655 * The modeset code is responsible for the selection of the exact link
5594 * rate later on, with the constraint of choosing a frequency that 5656 * rate later on, with the constraint of choosing a frequency that
5595 * works with required_vco. 5657 * works with vco.
5596 */ 5658 */
5597 val = I915_READ(DPLL_CTRL1); 5659 val = I915_READ(DPLL_CTRL1);
5598 5660
5599 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | 5661 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5600 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); 5662 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5601 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0); 5663 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5602 if (required_vco == 8640) 5664 if (vco == 8640000)
5603 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 5665 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5604 SKL_DPLL0); 5666 SKL_DPLL0);
5605 else 5667 else
@@ -5613,6 +5675,21 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5613 5675
5614 if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5)) 5676 if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5615 DRM_ERROR("DPLL0 not locked\n"); 5677 DRM_ERROR("DPLL0 not locked\n");
5678
5679 dev_priv->cdclk_pll.vco = vco;
5680
5681 /* We'll want to keep using the current vco from now on. */
5682 skl_set_preferred_cdclk_vco(dev_priv, vco);
5683}
5684
5685static void
5686skl_dpll0_disable(struct drm_i915_private *dev_priv)
5687{
5688 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5689 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5690 DRM_ERROR("Couldn't disable DPLL0\n");
5691
5692 dev_priv->cdclk_pll.vco = 0;
5616} 5693}
5617 5694
5618static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) 5695static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
@@ -5642,12 +5719,14 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5642 return false; 5719 return false;
5643} 5720}
5644 5721
5645static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) 5722static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
5646{ 5723{
5647 struct drm_device *dev = dev_priv->dev; 5724 struct drm_device *dev = dev_priv->dev;
5648 u32 freq_select, pcu_ack; 5725 u32 freq_select, pcu_ack;
5649 5726
5650 DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq); 5727 WARN_ON((cdclk == 24000) != (vco == 0));
5728
5729 DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
5651 5730
5652 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { 5731 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5653 DRM_ERROR("failed to inform PCU about cdclk change\n"); 5732 DRM_ERROR("failed to inform PCU about cdclk change\n");
@@ -5655,7 +5734,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5655 } 5734 }
5656 5735
5657 /* set CDCLK_CTL */ 5736 /* set CDCLK_CTL */
5658 switch(freq) { 5737 switch (cdclk) {
5659 case 450000: 5738 case 450000:
5660 case 432000: 5739 case 432000:
5661 freq_select = CDCLK_FREQ_450_432; 5740 freq_select = CDCLK_FREQ_450_432;
@@ -5665,20 +5744,27 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5665 freq_select = CDCLK_FREQ_540; 5744 freq_select = CDCLK_FREQ_540;
5666 pcu_ack = 2; 5745 pcu_ack = 2;
5667 break; 5746 break;
5668 case 308570: 5747 case 308571:
5669 case 337500: 5748 case 337500:
5670 default: 5749 default:
5671 freq_select = CDCLK_FREQ_337_308; 5750 freq_select = CDCLK_FREQ_337_308;
5672 pcu_ack = 0; 5751 pcu_ack = 0;
5673 break; 5752 break;
5674 case 617140: 5753 case 617143:
5675 case 675000: 5754 case 675000:
5676 freq_select = CDCLK_FREQ_675_617; 5755 freq_select = CDCLK_FREQ_675_617;
5677 pcu_ack = 3; 5756 pcu_ack = 3;
5678 break; 5757 break;
5679 } 5758 }
5680 5759
5681 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq)); 5760 if (dev_priv->cdclk_pll.vco != 0 &&
5761 dev_priv->cdclk_pll.vco != vco)
5762 skl_dpll0_disable(dev_priv);
5763
5764 if (dev_priv->cdclk_pll.vco != vco)
5765 skl_dpll0_enable(dev_priv, vco);
5766
5767 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
5682 POSTING_READ(CDCLK_CTL); 5768 POSTING_READ(CDCLK_CTL);
5683 5769
5684 /* inform PCU of the change */ 5770 /* inform PCU of the change */
@@ -5689,52 +5775,41 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5689 intel_update_cdclk(dev); 5775 intel_update_cdclk(dev);
5690} 5776}
5691 5777
5778static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
5779
5692void skl_uninit_cdclk(struct drm_i915_private *dev_priv) 5780void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5693{ 5781{
5694 /* disable DBUF power */ 5782 skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0);
5695 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5696 POSTING_READ(DBUF_CTL);
5697
5698 udelay(10);
5699
5700 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5701 DRM_ERROR("DBuf power disable timeout\n");
5702
5703 /* disable DPLL0 */
5704 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5705 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5706 DRM_ERROR("Couldn't disable DPLL0\n");
5707} 5783}
5708 5784
5709void skl_init_cdclk(struct drm_i915_private *dev_priv) 5785void skl_init_cdclk(struct drm_i915_private *dev_priv)
5710{ 5786{
5711 unsigned int required_vco; 5787 int cdclk, vco;
5712
5713 /* DPLL0 not enabled (happens on early BIOS versions) */
5714 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5715 /* enable DPLL0 */
5716 required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5717 skl_dpll0_enable(dev_priv, required_vco);
5718 }
5719 5788
5720 /* set CDCLK to the frequency the BIOS chose */ 5789 skl_sanitize_cdclk(dev_priv);
5721 skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5722 5790
5723 /* enable DBUF power */ 5791 if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) {
5724 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); 5792 /*
5725 POSTING_READ(DBUF_CTL); 5793 * Use the current vco as our initial
5794 * guess as to what the preferred vco is.
5795 */
5796 if (dev_priv->skl_preferred_vco_freq == 0)
5797 skl_set_preferred_cdclk_vco(dev_priv,
5798 dev_priv->cdclk_pll.vco);
5799 return;
5800 }
5726 5801
5727 udelay(10); 5802 vco = dev_priv->skl_preferred_vco_freq;
5803 if (vco == 0)
5804 vco = 8100000;
5805 cdclk = skl_calc_cdclk(0, vco);
5728 5806
5729 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 5807 skl_set_cdclk(dev_priv, cdclk, vco);
5730 DRM_ERROR("DBuf power enable timeout\n");
5731} 5808}
5732 5809
5733int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) 5810static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5734{ 5811{
5735 uint32_t lcpll1 = I915_READ(LCPLL1_CTL); 5812 uint32_t cdctl, expected;
5736 uint32_t cdctl = I915_READ(CDCLK_CTL);
5737 int freq = dev_priv->skl_boot_cdclk;
5738 5813
5739 /* 5814 /*
5740 * check if the pre-os intialized the display 5815 * check if the pre-os intialized the display
@@ -5744,8 +5819,10 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5744 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) 5819 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5745 goto sanitize; 5820 goto sanitize;
5746 5821
5822 intel_update_cdclk(dev_priv->dev);
5747 /* Is PLL enabled and locked ? */ 5823 /* Is PLL enabled and locked ? */
5748 if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK))) 5824 if (dev_priv->cdclk_pll.vco == 0 ||
5825 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
5749 goto sanitize; 5826 goto sanitize;
5750 5827
5751 /* DPLL okay; verify the cdclock 5828 /* DPLL okay; verify the cdclock
@@ -5754,19 +5831,20 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5754 * decimal part is programmed wrong from BIOS where pre-os does not 5831 * decimal part is programmed wrong from BIOS where pre-os does not
5755 * enable display. Verify the same as well. 5832 * enable display. Verify the same as well.
5756 */ 5833 */
5757 if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq))) 5834 cdctl = I915_READ(CDCLK_CTL);
5835 expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
5836 skl_cdclk_decimal(dev_priv->cdclk_freq);
5837 if (cdctl == expected)
5758 /* All well; nothing to sanitize */ 5838 /* All well; nothing to sanitize */
5759 return false; 5839 return;
5840
5760sanitize: 5841sanitize:
5761 /* 5842 DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
5762 * As of now initialize with max cdclk till
5763 * we get dynamic cdclk support
5764 * */
5765 dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5766 skl_init_cdclk(dev_priv);
5767 5843
5768 /* we did have to sanitize */ 5844 /* force cdclk programming */
5769 return true; 5845 dev_priv->cdclk_freq = 0;
5846 /* force full PLL disable + enable */
5847 dev_priv->cdclk_pll.vco = -1;
5770} 5848}
5771 5849
5772/* Adjust CDclk dividers to allow high res or save power if possible */ 5850/* Adjust CDclk dividers to allow high res or save power if possible */
@@ -5906,21 +5984,15 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5906 return 200000; 5984 return 200000;
5907} 5985}
5908 5986
5909static int broxton_calc_cdclk(struct drm_i915_private *dev_priv, 5987static int broxton_calc_cdclk(int max_pixclk)
5910 int max_pixclk)
5911{ 5988{
5912 /* 5989 if (max_pixclk > 576000)
5913 * FIXME:
5914 * - remove the guardband, it's not needed on BXT
5915 * - set 19.2MHz bypass frequency if there are no active pipes
5916 */
5917 if (max_pixclk > 576000*9/10)
5918 return 624000; 5990 return 624000;
5919 else if (max_pixclk > 384000*9/10) 5991 else if (max_pixclk > 384000)
5920 return 576000; 5992 return 576000;
5921 else if (max_pixclk > 288000*9/10) 5993 else if (max_pixclk > 288000)
5922 return 384000; 5994 return 384000;
5923 else if (max_pixclk > 144000*9/10) 5995 else if (max_pixclk > 144000)
5924 return 288000; 5996 return 288000;
5925 else 5997 else
5926 return 144000; 5998 return 144000;
@@ -5963,9 +6035,6 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
5963 struct intel_atomic_state *intel_state = 6035 struct intel_atomic_state *intel_state =
5964 to_intel_atomic_state(state); 6036 to_intel_atomic_state(state);
5965 6037
5966 if (max_pixclk < 0)
5967 return max_pixclk;
5968
5969 intel_state->cdclk = intel_state->dev_cdclk = 6038 intel_state->cdclk = intel_state->dev_cdclk =
5970 valleyview_calc_cdclk(dev_priv, max_pixclk); 6039 valleyview_calc_cdclk(dev_priv, max_pixclk);
5971 6040
@@ -5977,20 +6046,15 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
5977 6046
5978static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state) 6047static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
5979{ 6048{
5980 struct drm_device *dev = state->dev; 6049 int max_pixclk = ilk_max_pixel_rate(state);
5981 struct drm_i915_private *dev_priv = dev->dev_private;
5982 int max_pixclk = intel_mode_max_pixclk(dev, state);
5983 struct intel_atomic_state *intel_state = 6050 struct intel_atomic_state *intel_state =
5984 to_intel_atomic_state(state); 6051 to_intel_atomic_state(state);
5985 6052
5986 if (max_pixclk < 0)
5987 return max_pixclk;
5988
5989 intel_state->cdclk = intel_state->dev_cdclk = 6053 intel_state->cdclk = intel_state->dev_cdclk =
5990 broxton_calc_cdclk(dev_priv, max_pixclk); 6054 broxton_calc_cdclk(max_pixclk);
5991 6055
5992 if (!intel_state->active_crtcs) 6056 if (!intel_state->active_crtcs)
5993 intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0); 6057 intel_state->dev_cdclk = broxton_calc_cdclk(0);
5994 6058
5995 return 0; 6059 return 0;
5996} 6060}
@@ -6252,7 +6316,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6252 return; 6316 return;
6253 6317
6254 if (to_intel_plane_state(crtc->primary->state)->visible) { 6318 if (to_intel_plane_state(crtc->primary->state)->visible) {
6255 WARN_ON(intel_crtc->unpin_work); 6319 WARN_ON(intel_crtc->flip_work);
6256 6320
6257 intel_pre_disable_primary_noatomic(crtc); 6321 intel_pre_disable_primary_noatomic(crtc);
6258 6322
@@ -6262,8 +6326,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6262 6326
6263 dev_priv->display.crtc_disable(crtc); 6327 dev_priv->display.crtc_disable(crtc);
6264 6328
6265 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n", 6329 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6266 crtc->base.id); 6330 crtc->base.id, crtc->name);
6267 6331
6268 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); 6332 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6269 crtc->state->active = false; 6333 crtc->state->active = false;
@@ -6563,10 +6627,10 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6563 struct drm_device *dev = crtc->base.dev; 6627 struct drm_device *dev = crtc->base.dev;
6564 struct drm_i915_private *dev_priv = dev->dev_private; 6628 struct drm_i915_private *dev_priv = dev->dev_private;
6565 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6629 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6630 int clock_limit = dev_priv->max_dotclk_freq;
6566 6631
6567 /* FIXME should check pixel clock limits on all platforms */
6568 if (INTEL_INFO(dev)->gen < 4) { 6632 if (INTEL_INFO(dev)->gen < 4) {
6569 int clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 6633 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6570 6634
6571 /* 6635 /*
6572 * Enable double wide mode when the dot clock 6636 * Enable double wide mode when the dot clock
@@ -6574,16 +6638,16 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6574 */ 6638 */
6575 if (intel_crtc_supports_double_wide(crtc) && 6639 if (intel_crtc_supports_double_wide(crtc) &&
6576 adjusted_mode->crtc_clock > clock_limit) { 6640 adjusted_mode->crtc_clock > clock_limit) {
6577 clock_limit *= 2; 6641 clock_limit = dev_priv->max_dotclk_freq;
6578 pipe_config->double_wide = true; 6642 pipe_config->double_wide = true;
6579 } 6643 }
6644 }
6580 6645
6581 if (adjusted_mode->crtc_clock > clock_limit) { 6646 if (adjusted_mode->crtc_clock > clock_limit) {
6582 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 6647 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6583 adjusted_mode->crtc_clock, clock_limit, 6648 adjusted_mode->crtc_clock, clock_limit,
6584 yesno(pipe_config->double_wide)); 6649 yesno(pipe_config->double_wide));
6585 return -EINVAL; 6650 return -EINVAL;
6586 }
6587 } 6651 }
6588 6652
6589 /* 6653 /*
@@ -6615,76 +6679,98 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6615static int skylake_get_display_clock_speed(struct drm_device *dev) 6679static int skylake_get_display_clock_speed(struct drm_device *dev)
6616{ 6680{
6617 struct drm_i915_private *dev_priv = to_i915(dev); 6681 struct drm_i915_private *dev_priv = to_i915(dev);
6618 uint32_t lcpll1 = I915_READ(LCPLL1_CTL); 6682 uint32_t cdctl;
6619 uint32_t cdctl = I915_READ(CDCLK_CTL);
6620 uint32_t linkrate;
6621 6683
6622 if (!(lcpll1 & LCPLL_PLL_ENABLE)) 6684 skl_dpll0_update(dev_priv);
6623 return 24000; /* 24MHz is the cd freq with NSSC ref */
6624 6685
6625 if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540) 6686 if (dev_priv->cdclk_pll.vco == 0)
6626 return 540000; 6687 return dev_priv->cdclk_pll.ref;
6627 6688
6628 linkrate = (I915_READ(DPLL_CTRL1) & 6689 cdctl = I915_READ(CDCLK_CTL);
6629 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
6630 6690
6631 if (linkrate == DPLL_CTRL1_LINK_RATE_2160 || 6691 if (dev_priv->cdclk_pll.vco == 8640000) {
6632 linkrate == DPLL_CTRL1_LINK_RATE_1080) {
6633 /* vco 8640 */
6634 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6692 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6635 case CDCLK_FREQ_450_432: 6693 case CDCLK_FREQ_450_432:
6636 return 432000; 6694 return 432000;
6637 case CDCLK_FREQ_337_308: 6695 case CDCLK_FREQ_337_308:
6638 return 308570; 6696 return 308571;
6697 case CDCLK_FREQ_540:
6698 return 540000;
6639 case CDCLK_FREQ_675_617: 6699 case CDCLK_FREQ_675_617:
6640 return 617140; 6700 return 617143;
6641 default: 6701 default:
6642 WARN(1, "Unknown cd freq selection\n"); 6702 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
6643 } 6703 }
6644 } else { 6704 } else {
6645 /* vco 8100 */
6646 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6705 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6647 case CDCLK_FREQ_450_432: 6706 case CDCLK_FREQ_450_432:
6648 return 450000; 6707 return 450000;
6649 case CDCLK_FREQ_337_308: 6708 case CDCLK_FREQ_337_308:
6650 return 337500; 6709 return 337500;
6710 case CDCLK_FREQ_540:
6711 return 540000;
6651 case CDCLK_FREQ_675_617: 6712 case CDCLK_FREQ_675_617:
6652 return 675000; 6713 return 675000;
6653 default: 6714 default:
6654 WARN(1, "Unknown cd freq selection\n"); 6715 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
6655 } 6716 }
6656 } 6717 }
6657 6718
6658 /* error case, do as if DPLL0 isn't enabled */ 6719 return dev_priv->cdclk_pll.ref;
6659 return 24000; 6720}
6721
6722static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
6723{
6724 u32 val;
6725
6726 dev_priv->cdclk_pll.ref = 19200;
6727 dev_priv->cdclk_pll.vco = 0;
6728
6729 val = I915_READ(BXT_DE_PLL_ENABLE);
6730 if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
6731 return;
6732
6733 if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
6734 return;
6735
6736 val = I915_READ(BXT_DE_PLL_CTL);
6737 dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) *
6738 dev_priv->cdclk_pll.ref;
6660} 6739}
6661 6740
6662static int broxton_get_display_clock_speed(struct drm_device *dev) 6741static int broxton_get_display_clock_speed(struct drm_device *dev)
6663{ 6742{
6664 struct drm_i915_private *dev_priv = to_i915(dev); 6743 struct drm_i915_private *dev_priv = to_i915(dev);
6665 uint32_t cdctl = I915_READ(CDCLK_CTL); 6744 u32 divider;
6666 uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; 6745 int div, vco;
6667 uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
6668 int cdclk;
6669 6746
6670 if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE)) 6747 bxt_de_pll_update(dev_priv);
6671 return 19200;
6672 6748
6673 cdclk = 19200 * pll_ratio / 2; 6749 vco = dev_priv->cdclk_pll.vco;
6750 if (vco == 0)
6751 return dev_priv->cdclk_pll.ref;
6752
6753 divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
6674 6754
6675 switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) { 6755 switch (divider) {
6676 case BXT_CDCLK_CD2X_DIV_SEL_1: 6756 case BXT_CDCLK_CD2X_DIV_SEL_1:
6677 return cdclk; /* 576MHz or 624MHz */ 6757 div = 2;
6758 break;
6678 case BXT_CDCLK_CD2X_DIV_SEL_1_5: 6759 case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6679 return cdclk * 2 / 3; /* 384MHz */ 6760 div = 3;
6761 break;
6680 case BXT_CDCLK_CD2X_DIV_SEL_2: 6762 case BXT_CDCLK_CD2X_DIV_SEL_2:
6681 return cdclk / 2; /* 288MHz */ 6763 div = 4;
6764 break;
6682 case BXT_CDCLK_CD2X_DIV_SEL_4: 6765 case BXT_CDCLK_CD2X_DIV_SEL_4:
6683 return cdclk / 4; /* 144MHz */ 6766 div = 8;
6767 break;
6768 default:
6769 MISSING_CASE(divider);
6770 return dev_priv->cdclk_pll.ref;
6684 } 6771 }
6685 6772
6686 /* error case, do as if DE PLL isn't enabled */ 6773 return DIV_ROUND_CLOSEST(vco, div);
6687 return 19200;
6688} 6774}
6689 6775
6690static int broadwell_get_display_clock_speed(struct drm_device *dev) 6776static int broadwell_get_display_clock_speed(struct drm_device *dev)
@@ -7063,7 +7149,7 @@ static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7063 7149
7064static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 7150static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7065 struct intel_crtc_state *crtc_state, 7151 struct intel_crtc_state *crtc_state,
7066 intel_clock_t *reduced_clock) 7152 struct dpll *reduced_clock)
7067{ 7153{
7068 struct drm_device *dev = crtc->base.dev; 7154 struct drm_device *dev = crtc->base.dev;
7069 u32 fp, fp2 = 0; 7155 u32 fp, fp2 = 0;
@@ -7487,7 +7573,7 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7487 7573
7488static void i9xx_compute_dpll(struct intel_crtc *crtc, 7574static void i9xx_compute_dpll(struct intel_crtc *crtc,
7489 struct intel_crtc_state *crtc_state, 7575 struct intel_crtc_state *crtc_state,
7490 intel_clock_t *reduced_clock) 7576 struct dpll *reduced_clock)
7491{ 7577{
7492 struct drm_device *dev = crtc->base.dev; 7578 struct drm_device *dev = crtc->base.dev;
7493 struct drm_i915_private *dev_priv = dev->dev_private; 7579 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7563,7 +7649,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
7563 7649
7564static void i8xx_compute_dpll(struct intel_crtc *crtc, 7650static void i8xx_compute_dpll(struct intel_crtc *crtc,
7565 struct intel_crtc_state *crtc_state, 7651 struct intel_crtc_state *crtc_state,
7566 intel_clock_t *reduced_clock) 7652 struct dpll *reduced_clock)
7567{ 7653{
7568 struct drm_device *dev = crtc->base.dev; 7654 struct drm_device *dev = crtc->base.dev;
7569 struct drm_i915_private *dev_priv = dev->dev_private; 7655 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7817,7 +7903,7 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7817{ 7903{
7818 struct drm_device *dev = crtc->base.dev; 7904 struct drm_device *dev = crtc->base.dev;
7819 struct drm_i915_private *dev_priv = dev->dev_private; 7905 struct drm_i915_private *dev_priv = dev->dev_private;
7820 const intel_limit_t *limit; 7906 const struct intel_limit *limit;
7821 int refclk = 48000; 7907 int refclk = 48000;
7822 7908
7823 memset(&crtc_state->dpll_hw_state, 0, 7909 memset(&crtc_state->dpll_hw_state, 0,
@@ -7853,7 +7939,7 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7853{ 7939{
7854 struct drm_device *dev = crtc->base.dev; 7940 struct drm_device *dev = crtc->base.dev;
7855 struct drm_i915_private *dev_priv = dev->dev_private; 7941 struct drm_i915_private *dev_priv = dev->dev_private;
7856 const intel_limit_t *limit; 7942 const struct intel_limit *limit;
7857 int refclk = 96000; 7943 int refclk = 96000;
7858 7944
7859 memset(&crtc_state->dpll_hw_state, 0, 7945 memset(&crtc_state->dpll_hw_state, 0,
@@ -7896,7 +7982,7 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7896{ 7982{
7897 struct drm_device *dev = crtc->base.dev; 7983 struct drm_device *dev = crtc->base.dev;
7898 struct drm_i915_private *dev_priv = dev->dev_private; 7984 struct drm_i915_private *dev_priv = dev->dev_private;
7899 const intel_limit_t *limit; 7985 const struct intel_limit *limit;
7900 int refclk = 96000; 7986 int refclk = 96000;
7901 7987
7902 memset(&crtc_state->dpll_hw_state, 0, 7988 memset(&crtc_state->dpll_hw_state, 0,
@@ -7930,7 +8016,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7930{ 8016{
7931 struct drm_device *dev = crtc->base.dev; 8017 struct drm_device *dev = crtc->base.dev;
7932 struct drm_i915_private *dev_priv = dev->dev_private; 8018 struct drm_i915_private *dev_priv = dev->dev_private;
7933 const intel_limit_t *limit; 8019 const struct intel_limit *limit;
7934 int refclk = 96000; 8020 int refclk = 96000;
7935 8021
7936 memset(&crtc_state->dpll_hw_state, 0, 8022 memset(&crtc_state->dpll_hw_state, 0,
@@ -7963,7 +8049,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7963 struct intel_crtc_state *crtc_state) 8049 struct intel_crtc_state *crtc_state)
7964{ 8050{
7965 int refclk = 100000; 8051 int refclk = 100000;
7966 const intel_limit_t *limit = &intel_limits_chv; 8052 const struct intel_limit *limit = &intel_limits_chv;
7967 8053
7968 memset(&crtc_state->dpll_hw_state, 0, 8054 memset(&crtc_state->dpll_hw_state, 0,
7969 sizeof(crtc_state->dpll_hw_state)); 8055 sizeof(crtc_state->dpll_hw_state));
@@ -7984,7 +8070,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7984 struct intel_crtc_state *crtc_state) 8070 struct intel_crtc_state *crtc_state)
7985{ 8071{
7986 int refclk = 100000; 8072 int refclk = 100000;
7987 const intel_limit_t *limit = &intel_limits_vlv; 8073 const struct intel_limit *limit = &intel_limits_vlv;
7988 8074
7989 memset(&crtc_state->dpll_hw_state, 0, 8075 memset(&crtc_state->dpll_hw_state, 0,
7990 sizeof(crtc_state->dpll_hw_state)); 8076 sizeof(crtc_state->dpll_hw_state));
@@ -8034,7 +8120,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8034 struct drm_device *dev = crtc->base.dev; 8120 struct drm_device *dev = crtc->base.dev;
8035 struct drm_i915_private *dev_priv = dev->dev_private; 8121 struct drm_i915_private *dev_priv = dev->dev_private;
8036 int pipe = pipe_config->cpu_transcoder; 8122 int pipe = pipe_config->cpu_transcoder;
8037 intel_clock_t clock; 8123 struct dpll clock;
8038 u32 mdiv; 8124 u32 mdiv;
8039 int refclk = 100000; 8125 int refclk = 100000;
8040 8126
@@ -8131,7 +8217,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
8131 struct drm_i915_private *dev_priv = dev->dev_private; 8217 struct drm_i915_private *dev_priv = dev->dev_private;
8132 int pipe = pipe_config->cpu_transcoder; 8218 int pipe = pipe_config->cpu_transcoder;
8133 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8219 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8134 intel_clock_t clock; 8220 struct dpll clock;
8135 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 8221 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8136 int refclk = 100000; 8222 int refclk = 100000;
8137 8223
@@ -8275,12 +8361,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8275{ 8361{
8276 struct drm_i915_private *dev_priv = dev->dev_private; 8362 struct drm_i915_private *dev_priv = dev->dev_private;
8277 struct intel_encoder *encoder; 8363 struct intel_encoder *encoder;
8364 int i;
8278 u32 val, final; 8365 u32 val, final;
8279 bool has_lvds = false; 8366 bool has_lvds = false;
8280 bool has_cpu_edp = false; 8367 bool has_cpu_edp = false;
8281 bool has_panel = false; 8368 bool has_panel = false;
8282 bool has_ck505 = false; 8369 bool has_ck505 = false;
8283 bool can_ssc = false; 8370 bool can_ssc = false;
8371 bool using_ssc_source = false;
8284 8372
8285 /* We need to take the global config into account */ 8373 /* We need to take the global config into account */
8286 for_each_intel_encoder(dev, encoder) { 8374 for_each_intel_encoder(dev, encoder) {
@@ -8307,8 +8395,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8307 can_ssc = true; 8395 can_ssc = true;
8308 } 8396 }
8309 8397
8310 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", 8398 /* Check if any DPLLs are using the SSC source */
8311 has_panel, has_lvds, has_ck505); 8399 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8400 u32 temp = I915_READ(PCH_DPLL(i));
8401
8402 if (!(temp & DPLL_VCO_ENABLE))
8403 continue;
8404
8405 if ((temp & PLL_REF_INPUT_MASK) ==
8406 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8407 using_ssc_source = true;
8408 break;
8409 }
8410 }
8411
8412 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8413 has_panel, has_lvds, has_ck505, using_ssc_source);
8312 8414
8313 /* Ironlake: try to setup display ref clock before DPLL 8415 /* Ironlake: try to setup display ref clock before DPLL
8314 * enabling. This is only under driver's control after 8416 * enabling. This is only under driver's control after
@@ -8328,9 +8430,12 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8328 else 8430 else
8329 final |= DREF_NONSPREAD_SOURCE_ENABLE; 8431 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8330 8432
8331 final &= ~DREF_SSC_SOURCE_MASK;
8332 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8433 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8333 final &= ~DREF_SSC1_ENABLE; 8434
8435 if (!using_ssc_source) {
8436 final &= ~DREF_SSC_SOURCE_MASK;
8437 final &= ~DREF_SSC1_ENABLE;
8438 }
8334 8439
8335 if (has_panel) { 8440 if (has_panel) {
8336 final |= DREF_SSC_SOURCE_ENABLE; 8441 final |= DREF_SSC_SOURCE_ENABLE;
@@ -8393,7 +8498,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8393 POSTING_READ(PCH_DREF_CONTROL); 8498 POSTING_READ(PCH_DREF_CONTROL);
8394 udelay(200); 8499 udelay(200);
8395 } else { 8500 } else {
8396 DRM_DEBUG_KMS("Disabling SSC entirely\n"); 8501 DRM_DEBUG_KMS("Disabling CPU source output\n");
8397 8502
8398 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8503 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8399 8504
@@ -8404,16 +8509,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8404 POSTING_READ(PCH_DREF_CONTROL); 8509 POSTING_READ(PCH_DREF_CONTROL);
8405 udelay(200); 8510 udelay(200);
8406 8511
8407 /* Turn off the SSC source */ 8512 if (!using_ssc_source) {
8408 val &= ~DREF_SSC_SOURCE_MASK; 8513 DRM_DEBUG_KMS("Disabling SSC source\n");
8409 val |= DREF_SSC_SOURCE_DISABLE;
8410 8514
8411 /* Turn off SSC1 */ 8515 /* Turn off the SSC source */
8412 val &= ~DREF_SSC1_ENABLE; 8516 val &= ~DREF_SSC_SOURCE_MASK;
8517 val |= DREF_SSC_SOURCE_DISABLE;
8413 8518
8414 I915_WRITE(PCH_DREF_CONTROL, val); 8519 /* Turn off SSC1 */
8415 POSTING_READ(PCH_DREF_CONTROL); 8520 val &= ~DREF_SSC1_ENABLE;
8416 udelay(200); 8521
8522 I915_WRITE(PCH_DREF_CONTROL, val);
8523 POSTING_READ(PCH_DREF_CONTROL);
8524 udelay(200);
8525 }
8417 } 8526 }
8418 8527
8419 BUG_ON(val != final); 8528 BUG_ON(val != final);
@@ -8794,7 +8903,7 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8794 8903
8795static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, 8904static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8796 struct intel_crtc_state *crtc_state, 8905 struct intel_crtc_state *crtc_state,
8797 intel_clock_t *reduced_clock) 8906 struct dpll *reduced_clock)
8798{ 8907{
8799 struct drm_crtc *crtc = &intel_crtc->base; 8908 struct drm_crtc *crtc = &intel_crtc->base;
8800 struct drm_device *dev = crtc->dev; 8909 struct drm_device *dev = crtc->dev;
@@ -8902,10 +9011,10 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8902{ 9011{
8903 struct drm_device *dev = crtc->base.dev; 9012 struct drm_device *dev = crtc->base.dev;
8904 struct drm_i915_private *dev_priv = dev->dev_private; 9013 struct drm_i915_private *dev_priv = dev->dev_private;
8905 intel_clock_t reduced_clock; 9014 struct dpll reduced_clock;
8906 bool has_reduced_clock = false; 9015 bool has_reduced_clock = false;
8907 struct intel_shared_dpll *pll; 9016 struct intel_shared_dpll *pll;
8908 const intel_limit_t *limit; 9017 const struct intel_limit *limit;
8909 int refclk = 120000; 9018 int refclk = 120000;
8910 9019
8911 memset(&crtc_state->dpll_hw_state, 0, 9020 memset(&crtc_state->dpll_hw_state, 0,
@@ -9300,6 +9409,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9300 ironlake_get_fdi_m_n_config(crtc, pipe_config); 9409 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9301 9410
9302 if (HAS_PCH_IBX(dev_priv)) { 9411 if (HAS_PCH_IBX(dev_priv)) {
9412 /*
9413 * The pipe->pch transcoder and pch transcoder->pll
9414 * mapping is fixed.
9415 */
9303 pll_id = (enum intel_dpll_id) crtc->pipe; 9416 pll_id = (enum intel_dpll_id) crtc->pipe;
9304 } else { 9417 } else {
9305 tmp = I915_READ(PCH_DPLL_SEL); 9418 tmp = I915_READ(PCH_DPLL_SEL);
@@ -9687,6 +9800,18 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9687 cdclk, dev_priv->cdclk_freq); 9800 cdclk, dev_priv->cdclk_freq);
9688} 9801}
9689 9802
9803static int broadwell_calc_cdclk(int max_pixclk)
9804{
9805 if (max_pixclk > 540000)
9806 return 675000;
9807 else if (max_pixclk > 450000)
9808 return 540000;
9809 else if (max_pixclk > 337500)
9810 return 450000;
9811 else
9812 return 337500;
9813}
9814
9690static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) 9815static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9691{ 9816{
9692 struct drm_i915_private *dev_priv = to_i915(state->dev); 9817 struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -9698,14 +9823,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9698 * FIXME should also account for plane ratio 9823 * FIXME should also account for plane ratio
9699 * once 64bpp pixel formats are supported. 9824 * once 64bpp pixel formats are supported.
9700 */ 9825 */
9701 if (max_pixclk > 540000) 9826 cdclk = broadwell_calc_cdclk(max_pixclk);
9702 cdclk = 675000;
9703 else if (max_pixclk > 450000)
9704 cdclk = 540000;
9705 else if (max_pixclk > 337500)
9706 cdclk = 450000;
9707 else
9708 cdclk = 337500;
9709 9827
9710 if (cdclk > dev_priv->max_cdclk_freq) { 9828 if (cdclk > dev_priv->max_cdclk_freq) {
9711 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n", 9829 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
@@ -9715,7 +9833,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9715 9833
9716 intel_state->cdclk = intel_state->dev_cdclk = cdclk; 9834 intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9717 if (!intel_state->active_crtcs) 9835 if (!intel_state->active_crtcs)
9718 intel_state->dev_cdclk = 337500; 9836 intel_state->dev_cdclk = broadwell_calc_cdclk(0);
9719 9837
9720 return 0; 9838 return 0;
9721} 9839}
@@ -9730,6 +9848,47 @@ static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9730 broadwell_set_cdclk(dev, req_cdclk); 9848 broadwell_set_cdclk(dev, req_cdclk);
9731} 9849}
9732 9850
9851static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
9852{
9853 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9854 struct drm_i915_private *dev_priv = to_i915(state->dev);
9855 const int max_pixclk = ilk_max_pixel_rate(state);
9856 int vco = intel_state->cdclk_pll_vco;
9857 int cdclk;
9858
9859 /*
9860 * FIXME should also account for plane ratio
9861 * once 64bpp pixel formats are supported.
9862 */
9863 cdclk = skl_calc_cdclk(max_pixclk, vco);
9864
9865 /*
9866 * FIXME move the cdclk caclulation to
9867 * compute_config() so we can fail gracegully.
9868 */
9869 if (cdclk > dev_priv->max_cdclk_freq) {
9870 DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9871 cdclk, dev_priv->max_cdclk_freq);
9872 cdclk = dev_priv->max_cdclk_freq;
9873 }
9874
9875 intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9876 if (!intel_state->active_crtcs)
9877 intel_state->dev_cdclk = skl_calc_cdclk(0, vco);
9878
9879 return 0;
9880}
9881
9882static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9883{
9884 struct drm_i915_private *dev_priv = to_i915(old_state->dev);
9885 struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state);
9886 unsigned int req_cdclk = intel_state->dev_cdclk;
9887 unsigned int req_vco = intel_state->cdclk_pll_vco;
9888
9889 skl_set_cdclk(dev_priv, req_cdclk, req_vco);
9890}
9891
9733static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 9892static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9734 struct intel_crtc_state *crtc_state) 9893 struct intel_crtc_state *crtc_state)
9735{ 9894{
@@ -9850,6 +10009,10 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9850 enum intel_display_power_domain power_domain; 10009 enum intel_display_power_domain power_domain;
9851 u32 tmp; 10010 u32 tmp;
9852 10011
10012 /*
10013 * The pipe->transcoder mapping is fixed with the exception of the eDP
10014 * transcoder handled below.
10015 */
9853 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10016 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9854 10017
9855 /* 10018 /*
@@ -10317,10 +10480,10 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
10317 struct drm_i915_gem_object *obj; 10480 struct drm_i915_gem_object *obj;
10318 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 10481 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
10319 10482
10320 obj = i915_gem_alloc_object(dev, 10483 obj = i915_gem_object_create(dev,
10321 intel_framebuffer_size_for_mode(mode, bpp)); 10484 intel_framebuffer_size_for_mode(mode, bpp));
10322 if (obj == NULL) 10485 if (IS_ERR(obj))
10323 return ERR_PTR(-ENOMEM); 10486 return ERR_CAST(obj);
10324 10487
10325 mode_cmd.width = mode->hdisplay; 10488 mode_cmd.width = mode->hdisplay;
10326 mode_cmd.height = mode->vdisplay; 10489 mode_cmd.height = mode->vdisplay;
@@ -10632,7 +10795,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10632 int pipe = pipe_config->cpu_transcoder; 10795 int pipe = pipe_config->cpu_transcoder;
10633 u32 dpll = pipe_config->dpll_hw_state.dpll; 10796 u32 dpll = pipe_config->dpll_hw_state.dpll;
10634 u32 fp; 10797 u32 fp;
10635 intel_clock_t clock; 10798 struct dpll clock;
10636 int port_clock; 10799 int port_clock;
10637 int refclk = i9xx_pll_refclk(dev, pipe_config); 10800 int refclk = i9xx_pll_refclk(dev, pipe_config);
10638 10801
@@ -10806,31 +10969,27 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10806 return mode; 10969 return mode;
10807} 10970}
10808 10971
10809void intel_mark_busy(struct drm_device *dev) 10972void intel_mark_busy(struct drm_i915_private *dev_priv)
10810{ 10973{
10811 struct drm_i915_private *dev_priv = dev->dev_private;
10812
10813 if (dev_priv->mm.busy) 10974 if (dev_priv->mm.busy)
10814 return; 10975 return;
10815 10976
10816 intel_runtime_pm_get(dev_priv); 10977 intel_runtime_pm_get(dev_priv);
10817 i915_update_gfx_val(dev_priv); 10978 i915_update_gfx_val(dev_priv);
10818 if (INTEL_INFO(dev)->gen >= 6) 10979 if (INTEL_GEN(dev_priv) >= 6)
10819 gen6_rps_busy(dev_priv); 10980 gen6_rps_busy(dev_priv);
10820 dev_priv->mm.busy = true; 10981 dev_priv->mm.busy = true;
10821} 10982}
10822 10983
10823void intel_mark_idle(struct drm_device *dev) 10984void intel_mark_idle(struct drm_i915_private *dev_priv)
10824{ 10985{
10825 struct drm_i915_private *dev_priv = dev->dev_private;
10826
10827 if (!dev_priv->mm.busy) 10986 if (!dev_priv->mm.busy)
10828 return; 10987 return;
10829 10988
10830 dev_priv->mm.busy = false; 10989 dev_priv->mm.busy = false;
10831 10990
10832 if (INTEL_INFO(dev)->gen >= 6) 10991 if (INTEL_GEN(dev_priv) >= 6)
10833 gen6_rps_idle(dev->dev_private); 10992 gen6_rps_idle(dev_priv);
10834 10993
10835 intel_runtime_pm_put(dev_priv); 10994 intel_runtime_pm_put(dev_priv);
10836} 10995}
@@ -10839,15 +10998,16 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
10839{ 10998{
10840 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10999 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10841 struct drm_device *dev = crtc->dev; 11000 struct drm_device *dev = crtc->dev;
10842 struct intel_unpin_work *work; 11001 struct intel_flip_work *work;
10843 11002
10844 spin_lock_irq(&dev->event_lock); 11003 spin_lock_irq(&dev->event_lock);
10845 work = intel_crtc->unpin_work; 11004 work = intel_crtc->flip_work;
10846 intel_crtc->unpin_work = NULL; 11005 intel_crtc->flip_work = NULL;
10847 spin_unlock_irq(&dev->event_lock); 11006 spin_unlock_irq(&dev->event_lock);
10848 11007
10849 if (work) { 11008 if (work) {
10850 cancel_work_sync(&work->work); 11009 cancel_work_sync(&work->mmio_work);
11010 cancel_work_sync(&work->unpin_work);
10851 kfree(work); 11011 kfree(work);
10852 } 11012 }
10853 11013
@@ -10858,12 +11018,15 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
10858 11018
10859static void intel_unpin_work_fn(struct work_struct *__work) 11019static void intel_unpin_work_fn(struct work_struct *__work)
10860{ 11020{
10861 struct intel_unpin_work *work = 11021 struct intel_flip_work *work =
10862 container_of(__work, struct intel_unpin_work, work); 11022 container_of(__work, struct intel_flip_work, unpin_work);
10863 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 11023 struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10864 struct drm_device *dev = crtc->base.dev; 11024 struct drm_device *dev = crtc->base.dev;
10865 struct drm_plane *primary = crtc->base.primary; 11025 struct drm_plane *primary = crtc->base.primary;
10866 11026
11027 if (is_mmio_work(work))
11028 flush_work(&work->mmio_work);
11029
10867 mutex_lock(&dev->struct_mutex); 11030 mutex_lock(&dev->struct_mutex);
10868 intel_unpin_fb_obj(work->old_fb, primary->state->rotation); 11031 intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
10869 drm_gem_object_unreference(&work->pending_flip_obj->base); 11032 drm_gem_object_unreference(&work->pending_flip_obj->base);
@@ -10882,60 +11045,14 @@ static void intel_unpin_work_fn(struct work_struct *__work)
10882 kfree(work); 11045 kfree(work);
10883} 11046}
10884 11047
10885static void do_intel_finish_page_flip(struct drm_device *dev,
10886 struct drm_crtc *crtc)
10887{
10888 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10889 struct intel_unpin_work *work;
10890 unsigned long flags;
10891
10892 /* Ignore early vblank irqs */
10893 if (intel_crtc == NULL)
10894 return;
10895
10896 /*
10897 * This is called both by irq handlers and the reset code (to complete
10898 * lost pageflips) so needs the full irqsave spinlocks.
10899 */
10900 spin_lock_irqsave(&dev->event_lock, flags);
10901 work = intel_crtc->unpin_work;
10902
10903 /* Ensure we don't miss a work->pending update ... */
10904 smp_rmb();
10905
10906 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
10907 spin_unlock_irqrestore(&dev->event_lock, flags);
10908 return;
10909 }
10910
10911 page_flip_completed(intel_crtc);
10912
10913 spin_unlock_irqrestore(&dev->event_lock, flags);
10914}
10915
10916void intel_finish_page_flip(struct drm_device *dev, int pipe)
10917{
10918 struct drm_i915_private *dev_priv = dev->dev_private;
10919 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10920
10921 do_intel_finish_page_flip(dev, crtc);
10922}
10923
10924void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
10925{
10926 struct drm_i915_private *dev_priv = dev->dev_private;
10927 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
10928
10929 do_intel_finish_page_flip(dev, crtc);
10930}
10931
10932/* Is 'a' after or equal to 'b'? */ 11048/* Is 'a' after or equal to 'b'? */
10933static bool g4x_flip_count_after_eq(u32 a, u32 b) 11049static bool g4x_flip_count_after_eq(u32 a, u32 b)
10934{ 11050{
10935 return !((a - b) & 0x80000000); 11051 return !((a - b) & 0x80000000);
10936} 11052}
10937 11053
10938static bool page_flip_finished(struct intel_crtc *crtc) 11054static bool __pageflip_finished_cs(struct intel_crtc *crtc,
11055 struct intel_flip_work *work)
10939{ 11056{
10940 struct drm_device *dev = crtc->base.dev; 11057 struct drm_device *dev = crtc->base.dev;
10941 struct drm_i915_private *dev_priv = dev->dev_private; 11058 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10977,40 +11094,103 @@ static bool page_flip_finished(struct intel_crtc *crtc)
10977 * anyway, we don't really care. 11094 * anyway, we don't really care.
10978 */ 11095 */
10979 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == 11096 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
10980 crtc->unpin_work->gtt_offset && 11097 crtc->flip_work->gtt_offset &&
10981 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)), 11098 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
10982 crtc->unpin_work->flip_count); 11099 crtc->flip_work->flip_count);
10983} 11100}
10984 11101
10985void intel_prepare_page_flip(struct drm_device *dev, int plane) 11102static bool
11103__pageflip_finished_mmio(struct intel_crtc *crtc,
11104 struct intel_flip_work *work)
10986{ 11105{
10987 struct drm_i915_private *dev_priv = dev->dev_private; 11106 /*
10988 struct intel_crtc *intel_crtc = 11107 * MMIO work completes when vblank is different from
10989 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 11108 * flip_queued_vblank.
11109 *
11110 * Reset counter value doesn't matter, this is handled by
11111 * i915_wait_request finishing early, so no need to handle
11112 * reset here.
11113 */
11114 return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
11115}
11116
11117
11118static bool pageflip_finished(struct intel_crtc *crtc,
11119 struct intel_flip_work *work)
11120{
11121 if (!atomic_read(&work->pending))
11122 return false;
11123
11124 smp_rmb();
11125
11126 if (is_mmio_work(work))
11127 return __pageflip_finished_mmio(crtc, work);
11128 else
11129 return __pageflip_finished_cs(crtc, work);
11130}
11131
11132void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
11133{
11134 struct drm_device *dev = dev_priv->dev;
11135 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11136 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11137 struct intel_flip_work *work;
11138 unsigned long flags;
11139
11140 /* Ignore early vblank irqs */
11141 if (!crtc)
11142 return;
11143
11144 /*
11145 * This is called both by irq handlers and the reset code (to complete
11146 * lost pageflips) so needs the full irqsave spinlocks.
11147 */
11148 spin_lock_irqsave(&dev->event_lock, flags);
11149 work = intel_crtc->flip_work;
11150
11151 if (work != NULL &&
11152 !is_mmio_work(work) &&
11153 pageflip_finished(intel_crtc, work))
11154 page_flip_completed(intel_crtc);
11155
11156 spin_unlock_irqrestore(&dev->event_lock, flags);
11157}
11158
11159void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
11160{
11161 struct drm_device *dev = dev_priv->dev;
11162 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11163 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11164 struct intel_flip_work *work;
10990 unsigned long flags; 11165 unsigned long flags;
10991 11166
11167 /* Ignore early vblank irqs */
11168 if (!crtc)
11169 return;
10992 11170
10993 /* 11171 /*
10994 * This is called both by irq handlers and the reset code (to complete 11172 * This is called both by irq handlers and the reset code (to complete
10995 * lost pageflips) so needs the full irqsave spinlocks. 11173 * lost pageflips) so needs the full irqsave spinlocks.
10996 *
10997 * NB: An MMIO update of the plane base pointer will also
10998 * generate a page-flip completion irq, i.e. every modeset
10999 * is also accompanied by a spurious intel_prepare_page_flip().
11000 */ 11174 */
11001 spin_lock_irqsave(&dev->event_lock, flags); 11175 spin_lock_irqsave(&dev->event_lock, flags);
11002 if (intel_crtc->unpin_work && page_flip_finished(intel_crtc)) 11176 work = intel_crtc->flip_work;
11003 atomic_inc_not_zero(&intel_crtc->unpin_work->pending); 11177
11178 if (work != NULL &&
11179 is_mmio_work(work) &&
11180 pageflip_finished(intel_crtc, work))
11181 page_flip_completed(intel_crtc);
11182
11004 spin_unlock_irqrestore(&dev->event_lock, flags); 11183 spin_unlock_irqrestore(&dev->event_lock, flags);
11005} 11184}
11006 11185
11007static inline void intel_mark_page_flip_active(struct intel_unpin_work *work) 11186static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
11187 struct intel_flip_work *work)
11008{ 11188{
11189 work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
11190
11009 /* Ensure that the work item is consistent when activating it ... */ 11191 /* Ensure that the work item is consistent when activating it ... */
11010 smp_wmb(); 11192 smp_mb__before_atomic();
11011 atomic_set(&work->pending, INTEL_FLIP_PENDING); 11193 atomic_set(&work->pending, 1);
11012 /* and that it is marked active as soon as the irq could fire. */
11013 smp_wmb();
11014} 11194}
11015 11195
11016static int intel_gen2_queue_flip(struct drm_device *dev, 11196static int intel_gen2_queue_flip(struct drm_device *dev,
@@ -11041,10 +11221,9 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
11041 intel_ring_emit(engine, MI_DISPLAY_FLIP | 11221 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11042 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11222 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11043 intel_ring_emit(engine, fb->pitches[0]); 11223 intel_ring_emit(engine, fb->pitches[0]);
11044 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); 11224 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11045 intel_ring_emit(engine, 0); /* aux display base address, unused */ 11225 intel_ring_emit(engine, 0); /* aux display base address, unused */
11046 11226
11047 intel_mark_page_flip_active(intel_crtc->unpin_work);
11048 return 0; 11227 return 0;
11049} 11228}
11050 11229
@@ -11073,10 +11252,9 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
11073 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | 11252 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
11074 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11253 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11075 intel_ring_emit(engine, fb->pitches[0]); 11254 intel_ring_emit(engine, fb->pitches[0]);
11076 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); 11255 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11077 intel_ring_emit(engine, MI_NOOP); 11256 intel_ring_emit(engine, MI_NOOP);
11078 11257
11079 intel_mark_page_flip_active(intel_crtc->unpin_work);
11080 return 0; 11258 return 0;
11081} 11259}
11082 11260
@@ -11104,7 +11282,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
11104 intel_ring_emit(engine, MI_DISPLAY_FLIP | 11282 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11105 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11283 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11106 intel_ring_emit(engine, fb->pitches[0]); 11284 intel_ring_emit(engine, fb->pitches[0]);
11107 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset | 11285 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
11108 obj->tiling_mode); 11286 obj->tiling_mode);
11109 11287
11110 /* XXX Enabling the panel-fitter across page-flip is so far 11288 /* XXX Enabling the panel-fitter across page-flip is so far
@@ -11115,7 +11293,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
11115 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 11293 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11116 intel_ring_emit(engine, pf | pipesrc); 11294 intel_ring_emit(engine, pf | pipesrc);
11117 11295
11118 intel_mark_page_flip_active(intel_crtc->unpin_work);
11119 return 0; 11296 return 0;
11120} 11297}
11121 11298
@@ -11139,7 +11316,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
11139 intel_ring_emit(engine, MI_DISPLAY_FLIP | 11316 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11140 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11317 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11141 intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode); 11318 intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
11142 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); 11319 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11143 11320
11144 /* Contrary to the suggestions in the documentation, 11321 /* Contrary to the suggestions in the documentation,
11145 * "Enable Panel Fitter" does not seem to be required when page 11322 * "Enable Panel Fitter" does not seem to be required when page
@@ -11151,7 +11328,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
11151 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 11328 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11152 intel_ring_emit(engine, pf | pipesrc); 11329 intel_ring_emit(engine, pf | pipesrc);
11153 11330
11154 intel_mark_page_flip_active(intel_crtc->unpin_work);
11155 return 0; 11331 return 0;
11156} 11332}
11157 11333
@@ -11243,10 +11419,9 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
11243 11419
11244 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit); 11420 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
11245 intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode)); 11421 intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
11246 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); 11422 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11247 intel_ring_emit(engine, (MI_NOOP)); 11423 intel_ring_emit(engine, (MI_NOOP));
11248 11424
11249 intel_mark_page_flip_active(intel_crtc->unpin_work);
11250 return 0; 11425 return 0;
11251} 11426}
11252 11427
@@ -11264,7 +11439,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
11264 if (engine == NULL) 11439 if (engine == NULL)
11265 return true; 11440 return true;
11266 11441
11267 if (INTEL_INFO(engine->dev)->gen < 5) 11442 if (INTEL_GEN(engine->i915) < 5)
11268 return false; 11443 return false;
11269 11444
11270 if (i915.use_mmio_flip < 0) 11445 if (i915.use_mmio_flip < 0)
@@ -11283,7 +11458,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
11283 11458
11284static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, 11459static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11285 unsigned int rotation, 11460 unsigned int rotation,
11286 struct intel_unpin_work *work) 11461 struct intel_flip_work *work)
11287{ 11462{
11288 struct drm_device *dev = intel_crtc->base.dev; 11463 struct drm_device *dev = intel_crtc->base.dev;
11289 struct drm_i915_private *dev_priv = dev->dev_private; 11464 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -11335,7 +11510,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11335} 11510}
11336 11511
11337static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, 11512static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11338 struct intel_unpin_work *work) 11513 struct intel_flip_work *work)
11339{ 11514{
11340 struct drm_device *dev = intel_crtc->base.dev; 11515 struct drm_device *dev = intel_crtc->base.dev;
11341 struct drm_i915_private *dev_priv = dev->dev_private; 11516 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -11358,48 +11533,20 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11358 POSTING_READ(DSPSURF(intel_crtc->plane)); 11533 POSTING_READ(DSPSURF(intel_crtc->plane));
11359} 11534}
11360 11535
11361/* 11536static void intel_mmio_flip_work_func(struct work_struct *w)
11362 * XXX: This is the temporary way to update the plane registers until we get
11363 * around to using the usual plane update functions for MMIO flips
11364 */
11365static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11366{
11367 struct intel_crtc *crtc = mmio_flip->crtc;
11368 struct intel_unpin_work *work;
11369
11370 spin_lock_irq(&crtc->base.dev->event_lock);
11371 work = crtc->unpin_work;
11372 spin_unlock_irq(&crtc->base.dev->event_lock);
11373 if (work == NULL)
11374 return;
11375
11376 intel_mark_page_flip_active(work);
11377
11378 intel_pipe_update_start(crtc);
11379
11380 if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
11381 skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
11382 else
11383 /* use_mmio_flip() retricts MMIO flips to ilk+ */
11384 ilk_do_mmio_flip(crtc, work);
11385
11386 intel_pipe_update_end(crtc);
11387}
11388
11389static void intel_mmio_flip_work_func(struct work_struct *work)
11390{ 11537{
11391 struct intel_mmio_flip *mmio_flip = 11538 struct intel_flip_work *work =
11392 container_of(work, struct intel_mmio_flip, work); 11539 container_of(w, struct intel_flip_work, mmio_work);
11540 struct intel_crtc *crtc = to_intel_crtc(work->crtc);
11541 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11393 struct intel_framebuffer *intel_fb = 11542 struct intel_framebuffer *intel_fb =
11394 to_intel_framebuffer(mmio_flip->crtc->base.primary->fb); 11543 to_intel_framebuffer(crtc->base.primary->fb);
11395 struct drm_i915_gem_object *obj = intel_fb->obj; 11544 struct drm_i915_gem_object *obj = intel_fb->obj;
11396 11545
11397 if (mmio_flip->req) { 11546 if (work->flip_queued_req)
11398 WARN_ON(__i915_wait_request(mmio_flip->req, 11547 WARN_ON(__i915_wait_request(work->flip_queued_req,
11399 false, NULL, 11548 false, NULL,
11400 &mmio_flip->i915->rps.mmioflips)); 11549 &dev_priv->rps.mmioflips));
11401 i915_gem_request_unreference__unlocked(mmio_flip->req);
11402 }
11403 11550
11404 /* For framebuffer backed by dmabuf, wait for fence */ 11551 /* For framebuffer backed by dmabuf, wait for fence */
11405 if (obj->base.dma_buf) 11552 if (obj->base.dma_buf)
@@ -11407,29 +11554,15 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
11407 false, false, 11554 false, false,
11408 MAX_SCHEDULE_TIMEOUT) < 0); 11555 MAX_SCHEDULE_TIMEOUT) < 0);
11409 11556
11410 intel_do_mmio_flip(mmio_flip); 11557 intel_pipe_update_start(crtc);
11411 kfree(mmio_flip);
11412}
11413
11414static int intel_queue_mmio_flip(struct drm_device *dev,
11415 struct drm_crtc *crtc,
11416 struct drm_i915_gem_object *obj)
11417{
11418 struct intel_mmio_flip *mmio_flip;
11419
11420 mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
11421 if (mmio_flip == NULL)
11422 return -ENOMEM;
11423
11424 mmio_flip->i915 = to_i915(dev);
11425 mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
11426 mmio_flip->crtc = to_intel_crtc(crtc);
11427 mmio_flip->rotation = crtc->primary->state->rotation;
11428 11558
11429 INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func); 11559 if (INTEL_GEN(dev_priv) >= 9)
11430 schedule_work(&mmio_flip->work); 11560 skl_do_mmio_flip(crtc, work->rotation, work);
11561 else
11562 /* use_mmio_flip() retricts MMIO flips to ilk+ */
11563 ilk_do_mmio_flip(crtc, work);
11431 11564
11432 return 0; 11565 intel_pipe_update_end(crtc, work);
11433} 11566}
11434 11567
11435static int intel_default_queue_flip(struct drm_device *dev, 11568static int intel_default_queue_flip(struct drm_device *dev,
@@ -11442,37 +11575,32 @@ static int intel_default_queue_flip(struct drm_device *dev,
11442 return -ENODEV; 11575 return -ENODEV;
11443} 11576}
11444 11577
11445static bool __intel_pageflip_stall_check(struct drm_device *dev, 11578static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
11446 struct drm_crtc *crtc) 11579 struct intel_crtc *intel_crtc,
11580 struct intel_flip_work *work)
11447{ 11581{
11448 struct drm_i915_private *dev_priv = dev->dev_private; 11582 u32 addr, vblank;
11449 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11450 struct intel_unpin_work *work = intel_crtc->unpin_work;
11451 u32 addr;
11452
11453 if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
11454 return true;
11455 11583
11456 if (atomic_read(&work->pending) < INTEL_FLIP_PENDING) 11584 if (!atomic_read(&work->pending))
11457 return false; 11585 return false;
11458 11586
11459 if (!work->enable_stall_check) 11587 smp_rmb();
11460 return false;
11461 11588
11589 vblank = intel_crtc_get_vblank_counter(intel_crtc);
11462 if (work->flip_ready_vblank == 0) { 11590 if (work->flip_ready_vblank == 0) {
11463 if (work->flip_queued_req && 11591 if (work->flip_queued_req &&
11464 !i915_gem_request_completed(work->flip_queued_req, true)) 11592 !i915_gem_request_completed(work->flip_queued_req, true))
11465 return false; 11593 return false;
11466 11594
11467 work->flip_ready_vblank = drm_crtc_vblank_count(crtc); 11595 work->flip_ready_vblank = vblank;
11468 } 11596 }
11469 11597
11470 if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3) 11598 if (vblank - work->flip_ready_vblank < 3)
11471 return false; 11599 return false;
11472 11600
11473 /* Potential stall - if we see that the flip has happened, 11601 /* Potential stall - if we see that the flip has happened,
11474 * assume a missed interrupt. */ 11602 * assume a missed interrupt. */
11475 if (INTEL_INFO(dev)->gen >= 4) 11603 if (INTEL_GEN(dev_priv) >= 4)
11476 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); 11604 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11477 else 11605 else
11478 addr = I915_READ(DSPADDR(intel_crtc->plane)); 11606 addr = I915_READ(DSPADDR(intel_crtc->plane));
@@ -11484,12 +11612,12 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
11484 return addr == work->gtt_offset; 11612 return addr == work->gtt_offset;
11485} 11613}
11486 11614
11487void intel_check_page_flip(struct drm_device *dev, int pipe) 11615void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
11488{ 11616{
11489 struct drm_i915_private *dev_priv = dev->dev_private; 11617 struct drm_device *dev = dev_priv->dev;
11490 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11618 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11491 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11619 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11492 struct intel_unpin_work *work; 11620 struct intel_flip_work *work;
11493 11621
11494 WARN_ON(!in_interrupt()); 11622 WARN_ON(!in_interrupt());
11495 11623
@@ -11497,16 +11625,20 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
11497 return; 11625 return;
11498 11626
11499 spin_lock(&dev->event_lock); 11627 spin_lock(&dev->event_lock);
11500 work = intel_crtc->unpin_work; 11628 work = intel_crtc->flip_work;
11501 if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) { 11629
11502 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", 11630 if (work != NULL && !is_mmio_work(work) &&
11503 work->flip_queued_vblank, drm_vblank_count(dev, pipe)); 11631 __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
11632 WARN_ONCE(1,
11633 "Kicking stuck page flip: queued at %d, now %d\n",
11634 work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
11504 page_flip_completed(intel_crtc); 11635 page_flip_completed(intel_crtc);
11505 work = NULL; 11636 work = NULL;
11506 } 11637 }
11507 if (work != NULL && 11638
11508 drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1) 11639 if (work != NULL && !is_mmio_work(work) &&
11509 intel_queue_rps_boost_for_request(dev, work->flip_queued_req); 11640 intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
11641 intel_queue_rps_boost_for_request(work->flip_queued_req);
11510 spin_unlock(&dev->event_lock); 11642 spin_unlock(&dev->event_lock);
11511} 11643}
11512 11644
@@ -11522,7 +11654,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11522 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11654 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11523 struct drm_plane *primary = crtc->primary; 11655 struct drm_plane *primary = crtc->primary;
11524 enum pipe pipe = intel_crtc->pipe; 11656 enum pipe pipe = intel_crtc->pipe;
11525 struct intel_unpin_work *work; 11657 struct intel_flip_work *work;
11526 struct intel_engine_cs *engine; 11658 struct intel_engine_cs *engine;
11527 bool mmio_flip; 11659 bool mmio_flip;
11528 struct drm_i915_gem_request *request = NULL; 11660 struct drm_i915_gem_request *request = NULL;
@@ -11559,19 +11691,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11559 work->event = event; 11691 work->event = event;
11560 work->crtc = crtc; 11692 work->crtc = crtc;
11561 work->old_fb = old_fb; 11693 work->old_fb = old_fb;
11562 INIT_WORK(&work->work, intel_unpin_work_fn); 11694 INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
11563 11695
11564 ret = drm_crtc_vblank_get(crtc); 11696 ret = drm_crtc_vblank_get(crtc);
11565 if (ret) 11697 if (ret)
11566 goto free_work; 11698 goto free_work;
11567 11699
11568 /* We borrow the event spin lock for protecting unpin_work */ 11700 /* We borrow the event spin lock for protecting flip_work */
11569 spin_lock_irq(&dev->event_lock); 11701 spin_lock_irq(&dev->event_lock);
11570 if (intel_crtc->unpin_work) { 11702 if (intel_crtc->flip_work) {
11571 /* Before declaring the flip queue wedged, check if 11703 /* Before declaring the flip queue wedged, check if
11572 * the hardware completed the operation behind our backs. 11704 * the hardware completed the operation behind our backs.
11573 */ 11705 */
11574 if (__intel_pageflip_stall_check(dev, crtc)) { 11706 if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
11575 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); 11707 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11576 page_flip_completed(intel_crtc); 11708 page_flip_completed(intel_crtc);
11577 } else { 11709 } else {
@@ -11583,7 +11715,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11583 return -EBUSY; 11715 return -EBUSY;
11584 } 11716 }
11585 } 11717 }
11586 intel_crtc->unpin_work = work; 11718 intel_crtc->flip_work = work;
11587 spin_unlock_irq(&dev->event_lock); 11719 spin_unlock_irq(&dev->event_lock);
11588 11720
11589 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 11721 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
@@ -11638,6 +11770,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11638 */ 11770 */
11639 if (!mmio_flip) { 11771 if (!mmio_flip) {
11640 ret = i915_gem_object_sync(obj, engine, &request); 11772 ret = i915_gem_object_sync(obj, engine, &request);
11773 if (!ret && !request) {
11774 request = i915_gem_request_alloc(engine, NULL);
11775 ret = PTR_ERR_OR_ZERO(request);
11776 }
11777
11641 if (ret) 11778 if (ret)
11642 goto cleanup_pending; 11779 goto cleanup_pending;
11643 } 11780 }
@@ -11649,38 +11786,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11649 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), 11786 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11650 obj, 0); 11787 obj, 0);
11651 work->gtt_offset += intel_crtc->dspaddr_offset; 11788 work->gtt_offset += intel_crtc->dspaddr_offset;
11789 work->rotation = crtc->primary->state->rotation;
11652 11790
11653 if (mmio_flip) { 11791 if (mmio_flip) {
11654 ret = intel_queue_mmio_flip(dev, crtc, obj); 11792 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
11655 if (ret)
11656 goto cleanup_unpin;
11657 11793
11658 i915_gem_request_assign(&work->flip_queued_req, 11794 i915_gem_request_assign(&work->flip_queued_req,
11659 obj->last_write_req); 11795 obj->last_write_req);
11660 } else {
11661 if (!request) {
11662 request = i915_gem_request_alloc(engine, NULL);
11663 if (IS_ERR(request)) {
11664 ret = PTR_ERR(request);
11665 goto cleanup_unpin;
11666 }
11667 }
11668 11796
11797 schedule_work(&work->mmio_work);
11798 } else {
11799 i915_gem_request_assign(&work->flip_queued_req, request);
11669 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, 11800 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11670 page_flip_flags); 11801 page_flip_flags);
11671 if (ret) 11802 if (ret)
11672 goto cleanup_unpin; 11803 goto cleanup_unpin;
11673 11804
11674 i915_gem_request_assign(&work->flip_queued_req, request); 11805 intel_mark_page_flip_active(intel_crtc, work);
11675 }
11676 11806
11677 if (request)
11678 i915_add_request_no_flush(request); 11807 i915_add_request_no_flush(request);
11808 }
11679 11809
11680 work->flip_queued_vblank = drm_crtc_vblank_count(crtc); 11810 i915_gem_track_fb(intel_fb_obj(old_fb), obj,
11681 work->enable_stall_check = true;
11682
11683 i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
11684 to_intel_plane(primary)->frontbuffer_bit); 11811 to_intel_plane(primary)->frontbuffer_bit);
11685 mutex_unlock(&dev->struct_mutex); 11812 mutex_unlock(&dev->struct_mutex);
11686 11813
@@ -11706,7 +11833,7 @@ cleanup:
11706 drm_framebuffer_unreference(work->old_fb); 11833 drm_framebuffer_unreference(work->old_fb);
11707 11834
11708 spin_lock_irq(&dev->event_lock); 11835 spin_lock_irq(&dev->event_lock);
11709 intel_crtc->unpin_work = NULL; 11836 intel_crtc->flip_work = NULL;
11710 spin_unlock_irq(&dev->event_lock); 11837 spin_unlock_irq(&dev->event_lock);
11711 11838
11712 drm_crtc_vblank_put(crtc); 11839 drm_crtc_vblank_put(crtc);
@@ -11808,12 +11935,12 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11808 struct drm_i915_private *dev_priv = to_i915(dev); 11935 struct drm_i915_private *dev_priv = to_i915(dev);
11809 struct intel_plane_state *old_plane_state = 11936 struct intel_plane_state *old_plane_state =
11810 to_intel_plane_state(plane->state); 11937 to_intel_plane_state(plane->state);
11811 int idx = intel_crtc->base.base.id, ret;
11812 bool mode_changed = needs_modeset(crtc_state); 11938 bool mode_changed = needs_modeset(crtc_state);
11813 bool was_crtc_enabled = crtc->state->active; 11939 bool was_crtc_enabled = crtc->state->active;
11814 bool is_crtc_enabled = crtc_state->active; 11940 bool is_crtc_enabled = crtc_state->active;
11815 bool turn_off, turn_on, visible, was_visible; 11941 bool turn_off, turn_on, visible, was_visible;
11816 struct drm_framebuffer *fb = plane_state->fb; 11942 struct drm_framebuffer *fb = plane_state->fb;
11943 int ret;
11817 11944
11818 if (crtc_state && INTEL_INFO(dev)->gen >= 9 && 11945 if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11819 plane->type != DRM_PLANE_TYPE_CURSOR) { 11946 plane->type != DRM_PLANE_TYPE_CURSOR) {
@@ -11834,6 +11961,11 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11834 * Visibility is calculated as if the crtc was on, but 11961 * Visibility is calculated as if the crtc was on, but
11835 * after scaler setup everything depends on it being off 11962 * after scaler setup everything depends on it being off
11836 * when the crtc isn't active. 11963 * when the crtc isn't active.
11964 *
11965 * FIXME this is wrong for watermarks. Watermarks should also
11966 * be computed as if the pipe would be active. Perhaps move
11967 * per-plane wm computation to the .check_plane() hook, and
11968 * only combine the results from all planes in the current place?
11837 */ 11969 */
11838 if (!is_crtc_enabled) 11970 if (!is_crtc_enabled)
11839 to_intel_plane_state(plane_state)->visible = visible = false; 11971 to_intel_plane_state(plane_state)->visible = visible = false;
@@ -11847,11 +11979,15 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11847 turn_off = was_visible && (!visible || mode_changed); 11979 turn_off = was_visible && (!visible || mode_changed);
11848 turn_on = visible && (!was_visible || mode_changed); 11980 turn_on = visible && (!was_visible || mode_changed);
11849 11981
11850 DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx, 11982 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
11851 plane->base.id, fb ? fb->base.id : -1); 11983 intel_crtc->base.base.id,
11984 intel_crtc->base.name,
11985 plane->base.id, plane->name,
11986 fb ? fb->base.id : -1);
11852 11987
11853 DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n", 11988 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11854 plane->base.id, was_visible, visible, 11989 plane->base.id, plane->name,
11990 was_visible, visible,
11855 turn_off, turn_on, mode_changed); 11991 turn_off, turn_on, mode_changed);
11856 11992
11857 if (turn_on) { 11993 if (turn_on) {
@@ -12007,7 +12143,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
12007 } 12143 }
12008 } else if (dev_priv->display.compute_intermediate_wm) { 12144 } else if (dev_priv->display.compute_intermediate_wm) {
12009 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) 12145 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
12010 pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk; 12146 pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
12011 } 12147 }
12012 12148
12013 if (INTEL_INFO(dev)->gen >= 9) { 12149 if (INTEL_INFO(dev)->gen >= 9) {
@@ -12142,7 +12278,8 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
12142 struct intel_plane_state *state; 12278 struct intel_plane_state *state;
12143 struct drm_framebuffer *fb; 12279 struct drm_framebuffer *fb;
12144 12280
12145 DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id, 12281 DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n",
12282 crtc->base.base.id, crtc->base.name,
12146 context, pipe_config, pipe_name(crtc->pipe)); 12283 context, pipe_config, pipe_name(crtc->pipe));
12147 12284
12148 DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder)); 12285 DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
@@ -12243,29 +12380,24 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
12243 state = to_intel_plane_state(plane->state); 12380 state = to_intel_plane_state(plane->state);
12244 fb = state->base.fb; 12381 fb = state->base.fb;
12245 if (!fb) { 12382 if (!fb) {
12246 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d " 12383 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
12247 "disabled, scaler_id = %d\n", 12384 plane->base.id, plane->name, state->scaler_id);
12248 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12249 plane->base.id, intel_plane->pipe,
12250 (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
12251 drm_plane_index(plane), state->scaler_id);
12252 continue; 12385 continue;
12253 } 12386 }
12254 12387
12255 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled", 12388 DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
12256 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", 12389 plane->base.id, plane->name);
12257 plane->base.id, intel_plane->pipe, 12390 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
12258 crtc->base.primary == plane ? 0 : intel_plane->plane + 1, 12391 fb->base.id, fb->width, fb->height,
12259 drm_plane_index(plane)); 12392 drm_get_format_name(fb->pixel_format));
12260 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x", 12393 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
12261 fb->base.id, fb->width, fb->height, fb->pixel_format); 12394 state->scaler_id,
12262 DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n", 12395 state->src.x1 >> 16, state->src.y1 >> 16,
12263 state->scaler_id, 12396 drm_rect_width(&state->src) >> 16,
12264 state->src.x1 >> 16, state->src.y1 >> 16, 12397 drm_rect_height(&state->src) >> 16,
12265 drm_rect_width(&state->src) >> 16, 12398 state->dst.x1, state->dst.y1,
12266 drm_rect_height(&state->src) >> 16, 12399 drm_rect_width(&state->dst),
12267 state->dst.x1, state->dst.y1, 12400 drm_rect_height(&state->dst));
12268 drm_rect_width(&state->dst), drm_rect_height(&state->dst));
12269 } 12401 }
12270} 12402}
12271 12403
@@ -12932,7 +13064,7 @@ verify_crtc_state(struct drm_crtc *crtc,
12932 pipe_config->base.crtc = crtc; 13064 pipe_config->base.crtc = crtc;
12933 pipe_config->base.state = old_state; 13065 pipe_config->base.state = old_state;
12934 13066
12935 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 13067 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
12936 13068
12937 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config); 13069 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12938 13070
@@ -13280,6 +13412,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
13280 intel_state->active_crtcs |= 1 << i; 13412 intel_state->active_crtcs |= 1 << i;
13281 else 13413 else
13282 intel_state->active_crtcs &= ~(1 << i); 13414 intel_state->active_crtcs &= ~(1 << i);
13415
13416 if (crtc_state->active != crtc->state->active)
13417 intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
13283 } 13418 }
13284 13419
13285 /* 13420 /*
@@ -13290,9 +13425,17 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
13290 * adjusted_mode bits in the crtc directly. 13425 * adjusted_mode bits in the crtc directly.
13291 */ 13426 */
13292 if (dev_priv->display.modeset_calc_cdclk) { 13427 if (dev_priv->display.modeset_calc_cdclk) {
13428 if (!intel_state->cdclk_pll_vco)
13429 intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco;
13430 if (!intel_state->cdclk_pll_vco)
13431 intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq;
13432
13293 ret = dev_priv->display.modeset_calc_cdclk(state); 13433 ret = dev_priv->display.modeset_calc_cdclk(state);
13434 if (ret < 0)
13435 return ret;
13294 13436
13295 if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq) 13437 if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
13438 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)
13296 ret = intel_modeset_all_pipes(state); 13439 ret = intel_modeset_all_pipes(state);
13297 13440
13298 if (ret < 0) 13441 if (ret < 0)
@@ -13316,38 +13459,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
13316 * phase. The code here should be run after the per-crtc and per-plane 'check' 13459 * phase. The code here should be run after the per-crtc and per-plane 'check'
13317 * handlers to ensure that all derived state has been updated. 13460 * handlers to ensure that all derived state has been updated.
13318 */ 13461 */
13319static void calc_watermark_data(struct drm_atomic_state *state) 13462static int calc_watermark_data(struct drm_atomic_state *state)
13320{ 13463{
13321 struct drm_device *dev = state->dev; 13464 struct drm_device *dev = state->dev;
13322 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13465 struct drm_i915_private *dev_priv = to_i915(dev);
13323 struct drm_crtc *crtc;
13324 struct drm_crtc_state *cstate;
13325 struct drm_plane *plane;
13326 struct drm_plane_state *pstate;
13327
13328 /*
13329 * Calculate watermark configuration details now that derived
13330 * plane/crtc state is all properly updated.
13331 */
13332 drm_for_each_crtc(crtc, dev) {
13333 cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
13334 crtc->state;
13335
13336 if (cstate->active)
13337 intel_state->wm_config.num_pipes_active++;
13338 }
13339 drm_for_each_legacy_plane(plane, dev) {
13340 pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
13341 plane->state;
13342 13466
13343 if (!to_intel_plane_state(pstate)->visible) 13467 /* Is there platform-specific watermark information to calculate? */
13344 continue; 13468 if (dev_priv->display.compute_global_watermarks)
13469 return dev_priv->display.compute_global_watermarks(state);
13345 13470
13346 intel_state->wm_config.sprites_enabled = true; 13471 return 0;
13347 if (pstate->crtc_w != pstate->src_w >> 16 ||
13348 pstate->crtc_h != pstate->src_h >> 16)
13349 intel_state->wm_config.sprites_scaled = true;
13350 }
13351} 13472}
13352 13473
13353/** 13474/**
@@ -13377,14 +13498,13 @@ static int intel_atomic_check(struct drm_device *dev,
13377 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) 13498 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13378 crtc_state->mode_changed = true; 13499 crtc_state->mode_changed = true;
13379 13500
13380 if (!crtc_state->enable) { 13501 if (!needs_modeset(crtc_state))
13381 if (needs_modeset(crtc_state))
13382 any_ms = true;
13383 continue; 13502 continue;
13384 }
13385 13503
13386 if (!needs_modeset(crtc_state)) 13504 if (!crtc_state->enable) {
13505 any_ms = true;
13387 continue; 13506 continue;
13507 }
13388 13508
13389 /* FIXME: For only active_changed we shouldn't need to do any 13509 /* FIXME: For only active_changed we shouldn't need to do any
13390 * state recomputation at all. */ 13510 * state recomputation at all. */
@@ -13394,8 +13514,11 @@ static int intel_atomic_check(struct drm_device *dev,
13394 return ret; 13514 return ret;
13395 13515
13396 ret = intel_modeset_pipe_config(crtc, pipe_config); 13516 ret = intel_modeset_pipe_config(crtc, pipe_config);
13397 if (ret) 13517 if (ret) {
13518 intel_dump_pipe_config(to_intel_crtc(crtc),
13519 pipe_config, "[failed]");
13398 return ret; 13520 return ret;
13521 }
13399 13522
13400 if (i915.fastboot && 13523 if (i915.fastboot &&
13401 intel_pipe_config_compare(dev, 13524 intel_pipe_config_compare(dev,
@@ -13405,13 +13528,12 @@ static int intel_atomic_check(struct drm_device *dev,
13405 to_intel_crtc_state(crtc_state)->update_pipe = true; 13528 to_intel_crtc_state(crtc_state)->update_pipe = true;
13406 } 13529 }
13407 13530
13408 if (needs_modeset(crtc_state)) { 13531 if (needs_modeset(crtc_state))
13409 any_ms = true; 13532 any_ms = true;
13410 13533
13411 ret = drm_atomic_add_affected_planes(state, crtc); 13534 ret = drm_atomic_add_affected_planes(state, crtc);
13412 if (ret) 13535 if (ret)
13413 return ret; 13536 return ret;
13414 }
13415 13537
13416 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 13538 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13417 needs_modeset(crtc_state) ? 13539 needs_modeset(crtc_state) ?
@@ -13431,9 +13553,7 @@ static int intel_atomic_check(struct drm_device *dev,
13431 return ret; 13553 return ret;
13432 13554
13433 intel_fbc_choose_crtc(dev_priv, state); 13555 intel_fbc_choose_crtc(dev_priv, state);
13434 calc_watermark_data(state); 13556 return calc_watermark_data(state);
13435
13436 return 0;
13437} 13557}
13438 13558
13439static int intel_atomic_prepare_commit(struct drm_device *dev, 13559static int intel_atomic_prepare_commit(struct drm_device *dev,
@@ -13495,6 +13615,16 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
13495 return ret; 13615 return ret;
13496} 13616}
13497 13617
13618u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13619{
13620 struct drm_device *dev = crtc->base.dev;
13621
13622 if (!dev->max_vblank_count)
13623 return drm_accurate_vblank_count(&crtc->base);
13624
13625 return dev->driver->get_vblank_counter(dev, crtc->pipe);
13626}
13627
13498static void intel_atomic_wait_for_vblanks(struct drm_device *dev, 13628static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
13499 struct drm_i915_private *dev_priv, 13629 struct drm_i915_private *dev_priv,
13500 unsigned crtc_mask) 13630 unsigned crtc_mask)
@@ -13596,8 +13726,9 @@ static int intel_atomic_commit(struct drm_device *dev,
13596 return ret; 13726 return ret;
13597 } 13727 }
13598 13728
13599 drm_atomic_helper_swap_state(dev, state); 13729 drm_atomic_helper_swap_state(state, true);
13600 dev_priv->wm.config = intel_state->wm_config; 13730 dev_priv->wm.distrust_bios_wm = false;
13731 dev_priv->wm.skl_results = intel_state->wm_results;
13601 intel_shared_dpll_commit(state); 13732 intel_shared_dpll_commit(state);
13602 13733
13603 if (intel_state->modeset) { 13734 if (intel_state->modeset) {
@@ -13653,7 +13784,8 @@ static int intel_atomic_commit(struct drm_device *dev,
13653 drm_atomic_helper_update_legacy_modeset_state(state->dev, state); 13784 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13654 13785
13655 if (dev_priv->display.modeset_commit_cdclk && 13786 if (dev_priv->display.modeset_commit_cdclk &&
13656 intel_state->dev_cdclk != dev_priv->cdclk_freq) 13787 (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
13788 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
13657 dev_priv->display.modeset_commit_cdclk(state); 13789 dev_priv->display.modeset_commit_cdclk(state);
13658 13790
13659 intel_modeset_verify_disabled(dev); 13791 intel_modeset_verify_disabled(dev);
@@ -13749,8 +13881,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
13749 13881
13750 state = drm_atomic_state_alloc(dev); 13882 state = drm_atomic_state_alloc(dev);
13751 if (!state) { 13883 if (!state) {
13752 DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory", 13884 DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
13753 crtc->base.id); 13885 crtc->base.id, crtc->name);
13754 return; 13886 return;
13755 } 13887 }
13756 13888
@@ -14006,7 +14138,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14006{ 14138{
14007 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 14139 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14008 14140
14009 intel_pipe_update_end(intel_crtc); 14141 intel_pipe_update_end(intel_crtc, NULL);
14010} 14142}
14011 14143
14012/** 14144/**
@@ -14018,9 +14150,11 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14018 */ 14150 */
14019void intel_plane_destroy(struct drm_plane *plane) 14151void intel_plane_destroy(struct drm_plane *plane)
14020{ 14152{
14021 struct intel_plane *intel_plane = to_intel_plane(plane); 14153 if (!plane)
14154 return;
14155
14022 drm_plane_cleanup(plane); 14156 drm_plane_cleanup(plane);
14023 kfree(intel_plane); 14157 kfree(to_intel_plane(plane));
14024} 14158}
14025 14159
14026const struct drm_plane_funcs intel_plane_funcs = { 14160const struct drm_plane_funcs intel_plane_funcs = {
@@ -14092,10 +14226,24 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14092 primary->disable_plane = i9xx_disable_primary_plane; 14226 primary->disable_plane = i9xx_disable_primary_plane;
14093 } 14227 }
14094 14228
14095 ret = drm_universal_plane_init(dev, &primary->base, 0, 14229 if (INTEL_INFO(dev)->gen >= 9)
14096 &intel_plane_funcs, 14230 ret = drm_universal_plane_init(dev, &primary->base, 0,
14097 intel_primary_formats, num_formats, 14231 &intel_plane_funcs,
14098 DRM_PLANE_TYPE_PRIMARY, NULL); 14232 intel_primary_formats, num_formats,
14233 DRM_PLANE_TYPE_PRIMARY,
14234 "plane 1%c", pipe_name(pipe));
14235 else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
14236 ret = drm_universal_plane_init(dev, &primary->base, 0,
14237 &intel_plane_funcs,
14238 intel_primary_formats, num_formats,
14239 DRM_PLANE_TYPE_PRIMARY,
14240 "primary %c", pipe_name(pipe));
14241 else
14242 ret = drm_universal_plane_init(dev, &primary->base, 0,
14243 &intel_plane_funcs,
14244 intel_primary_formats, num_formats,
14245 DRM_PLANE_TYPE_PRIMARY,
14246 "plane %c", plane_name(primary->plane));
14099 if (ret) 14247 if (ret)
14100 goto fail; 14248 goto fail;
14101 14249
@@ -14253,7 +14401,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14253 &intel_plane_funcs, 14401 &intel_plane_funcs,
14254 intel_cursor_formats, 14402 intel_cursor_formats,
14255 ARRAY_SIZE(intel_cursor_formats), 14403 ARRAY_SIZE(intel_cursor_formats),
14256 DRM_PLANE_TYPE_CURSOR, NULL); 14404 DRM_PLANE_TYPE_CURSOR,
14405 "cursor %c", pipe_name(pipe));
14257 if (ret) 14406 if (ret)
14258 goto fail; 14407 goto fail;
14259 14408
@@ -14338,7 +14487,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
14338 goto fail; 14487 goto fail;
14339 14488
14340 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, 14489 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
14341 cursor, &intel_crtc_funcs, NULL); 14490 cursor, &intel_crtc_funcs,
14491 "pipe %c", pipe_name(pipe));
14342 if (ret) 14492 if (ret)
14343 goto fail; 14493 goto fail;
14344 14494
@@ -14372,10 +14522,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
14372 return; 14522 return;
14373 14523
14374fail: 14524fail:
14375 if (primary) 14525 intel_plane_destroy(primary);
14376 drm_plane_cleanup(primary); 14526 intel_plane_destroy(cursor);
14377 if (cursor)
14378 drm_plane_cleanup(cursor);
14379 kfree(crtc_state); 14527 kfree(crtc_state);
14380 kfree(intel_crtc); 14528 kfree(intel_crtc);
14381} 14529}
@@ -14554,6 +14702,8 @@ static void intel_setup_outputs(struct drm_device *dev)
14554 if (I915_READ(PCH_DP_D) & DP_DETECTED) 14702 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14555 intel_dp_init(dev, PCH_DP_D, PORT_D); 14703 intel_dp_init(dev, PCH_DP_D, PORT_D);
14556 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 14704 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
14705 bool has_edp;
14706
14557 /* 14707 /*
14558 * The DP_DETECTED bit is the latched state of the DDC 14708 * The DP_DETECTED bit is the latched state of the DDC
14559 * SDA pin at boot. However since eDP doesn't require DDC 14709 * SDA pin at boot. However since eDP doesn't require DDC
@@ -14563,19 +14713,17 @@ static void intel_setup_outputs(struct drm_device *dev)
14563 * eDP ports. Consult the VBT as well as DP_DETECTED to 14713 * eDP ports. Consult the VBT as well as DP_DETECTED to
14564 * detect eDP ports. 14714 * detect eDP ports.
14565 */ 14715 */
14566 if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && 14716 has_edp = intel_dp_is_edp(dev, PORT_B);
14567 !intel_dp_is_edp(dev, PORT_B)) 14717 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_edp)
14718 has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
14719 if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && !has_edp)
14568 intel_hdmi_init(dev, VLV_HDMIB, PORT_B); 14720 intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14569 if (I915_READ(VLV_DP_B) & DP_DETECTED ||
14570 intel_dp_is_edp(dev, PORT_B))
14571 intel_dp_init(dev, VLV_DP_B, PORT_B);
14572 14721
14573 if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && 14722 has_edp = intel_dp_is_edp(dev, PORT_C);
14574 !intel_dp_is_edp(dev, PORT_C)) 14723 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_edp)
14724 has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
14725 if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && !has_edp)
14575 intel_hdmi_init(dev, VLV_HDMIC, PORT_C); 14726 intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14576 if (I915_READ(VLV_DP_C) & DP_DETECTED ||
14577 intel_dp_is_edp(dev, PORT_C))
14578 intel_dp_init(dev, VLV_DP_C, PORT_C);
14579 14727
14580 if (IS_CHERRYVIEW(dev)) { 14728 if (IS_CHERRYVIEW(dev)) {
14581 /* eDP not supported on port D, so don't check VBT */ 14729 /* eDP not supported on port D, so don't check VBT */
@@ -15050,12 +15198,13 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15050 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 15198 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15051 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 15199 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15052 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 15200 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15053 if (IS_BROADWELL(dev_priv)) { 15201 }
15054 dev_priv->display.modeset_commit_cdclk = 15202
15055 broadwell_modeset_commit_cdclk; 15203 if (IS_BROADWELL(dev_priv)) {
15056 dev_priv->display.modeset_calc_cdclk = 15204 dev_priv->display.modeset_commit_cdclk =
15057 broadwell_modeset_calc_cdclk; 15205 broadwell_modeset_commit_cdclk;
15058 } 15206 dev_priv->display.modeset_calc_cdclk =
15207 broadwell_modeset_calc_cdclk;
15059 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 15208 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15060 dev_priv->display.modeset_commit_cdclk = 15209 dev_priv->display.modeset_commit_cdclk =
15061 valleyview_modeset_commit_cdclk; 15210 valleyview_modeset_commit_cdclk;
@@ -15066,6 +15215,11 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15066 broxton_modeset_commit_cdclk; 15215 broxton_modeset_commit_cdclk;
15067 dev_priv->display.modeset_calc_cdclk = 15216 dev_priv->display.modeset_calc_cdclk =
15068 broxton_modeset_calc_cdclk; 15217 broxton_modeset_calc_cdclk;
15218 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
15219 dev_priv->display.modeset_commit_cdclk =
15220 skl_modeset_commit_cdclk;
15221 dev_priv->display.modeset_calc_cdclk =
15222 skl_modeset_calc_cdclk;
15069 } 15223 }
15070 15224
15071 switch (INTEL_INFO(dev_priv)->gen) { 15225 switch (INTEL_INFO(dev_priv)->gen) {
@@ -15293,7 +15447,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
15293 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; 15447 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
15294 15448
15295 intel_init_clock_gating(dev); 15449 intel_init_clock_gating(dev);
15296 intel_enable_gt_powersave(dev); 15450 intel_enable_gt_powersave(dev_priv);
15297} 15451}
15298 15452
15299/* 15453/*
@@ -15363,7 +15517,6 @@ retry:
15363 } 15517 }
15364 15518
15365 /* Write calculated watermark values back */ 15519 /* Write calculated watermark values back */
15366 to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
15367 for_each_crtc_in_state(state, crtc, cstate, i) { 15520 for_each_crtc_in_state(state, crtc, cstate, i) {
15368 struct intel_crtc_state *cs = to_intel_crtc_state(cstate); 15521 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15369 15522
@@ -15461,11 +15614,13 @@ void intel_modeset_init(struct drm_device *dev)
15461 } 15614 }
15462 15615
15463 intel_update_czclk(dev_priv); 15616 intel_update_czclk(dev_priv);
15464 intel_update_rawclk(dev_priv);
15465 intel_update_cdclk(dev); 15617 intel_update_cdclk(dev);
15466 15618
15467 intel_shared_dpll_init(dev); 15619 intel_shared_dpll_init(dev);
15468 15620
15621 if (dev_priv->max_cdclk_freq == 0)
15622 intel_update_max_cdclk(dev);
15623
15469 /* Just disable it once at startup */ 15624 /* Just disable it once at startup */
15470 i915_disable_vga(dev); 15625 i915_disable_vga(dev);
15471 intel_setup_outputs(dev); 15626 intel_setup_outputs(dev);
@@ -15606,8 +15761,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
15606 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { 15761 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15607 bool plane; 15762 bool plane;
15608 15763
15609 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", 15764 DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
15610 crtc->base.base.id); 15765 crtc->base.base.id, crtc->base.name);
15611 15766
15612 /* Pipe has the wrong plane attached and the plane is active. 15767 /* Pipe has the wrong plane attached and the plane is active.
15613 * Temporarily change the plane mapping and disable everything 15768 * Temporarily change the plane mapping and disable everything
@@ -15775,26 +15930,24 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
15775 if (crtc_state->base.active) { 15930 if (crtc_state->base.active) {
15776 dev_priv->active_crtcs |= 1 << crtc->pipe; 15931 dev_priv->active_crtcs |= 1 << crtc->pipe;
15777 15932
15778 if (IS_BROADWELL(dev_priv)) { 15933 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
15779 pixclk = ilk_pipe_pixel_rate(crtc_state); 15934 pixclk = ilk_pipe_pixel_rate(crtc_state);
15780 15935 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15781 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
15782 if (crtc_state->ips_enabled)
15783 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
15784 } else if (IS_VALLEYVIEW(dev_priv) ||
15785 IS_CHERRYVIEW(dev_priv) ||
15786 IS_BROXTON(dev_priv))
15787 pixclk = crtc_state->base.adjusted_mode.crtc_clock; 15936 pixclk = crtc_state->base.adjusted_mode.crtc_clock;
15788 else 15937 else
15789 WARN_ON(dev_priv->display.modeset_calc_cdclk); 15938 WARN_ON(dev_priv->display.modeset_calc_cdclk);
15939
15940 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
15941 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
15942 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
15790 } 15943 }
15791 15944
15792 dev_priv->min_pixclk[crtc->pipe] = pixclk; 15945 dev_priv->min_pixclk[crtc->pipe] = pixclk;
15793 15946
15794 readout_plane_state(crtc); 15947 readout_plane_state(crtc);
15795 15948
15796 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 15949 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15797 crtc->base.base.id, 15950 crtc->base.base.id, crtc->base.name,
15798 crtc->active ? "enabled" : "disabled"); 15951 crtc->active ? "enabled" : "disabled");
15799 } 15952 }
15800 15953
@@ -16025,15 +16178,16 @@ retry:
16025 16178
16026void intel_modeset_gem_init(struct drm_device *dev) 16179void intel_modeset_gem_init(struct drm_device *dev)
16027{ 16180{
16181 struct drm_i915_private *dev_priv = to_i915(dev);
16028 struct drm_crtc *c; 16182 struct drm_crtc *c;
16029 struct drm_i915_gem_object *obj; 16183 struct drm_i915_gem_object *obj;
16030 int ret; 16184 int ret;
16031 16185
16032 intel_init_gt_powersave(dev); 16186 intel_init_gt_powersave(dev_priv);
16033 16187
16034 intel_modeset_init_hw(dev); 16188 intel_modeset_init_hw(dev);
16035 16189
16036 intel_setup_overlay(dev); 16190 intel_setup_overlay(dev_priv);
16037 16191
16038 /* 16192 /*
16039 * Make sure any fbs we allocated at startup are properly 16193 * Make sure any fbs we allocated at startup are properly
@@ -16076,7 +16230,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
16076 struct drm_i915_private *dev_priv = dev->dev_private; 16230 struct drm_i915_private *dev_priv = dev->dev_private;
16077 struct intel_connector *connector; 16231 struct intel_connector *connector;
16078 16232
16079 intel_disable_gt_powersave(dev); 16233 intel_disable_gt_powersave(dev_priv);
16080 16234
16081 intel_backlight_unregister(dev); 16235 intel_backlight_unregister(dev);
16082 16236
@@ -16106,21 +16260,13 @@ void intel_modeset_cleanup(struct drm_device *dev)
16106 16260
16107 drm_mode_config_cleanup(dev); 16261 drm_mode_config_cleanup(dev);
16108 16262
16109 intel_cleanup_overlay(dev); 16263 intel_cleanup_overlay(dev_priv);
16110 16264
16111 intel_cleanup_gt_powersave(dev); 16265 intel_cleanup_gt_powersave(dev_priv);
16112 16266
16113 intel_teardown_gmbus(dev); 16267 intel_teardown_gmbus(dev);
16114} 16268}
16115 16269
16116/*
16117 * Return which encoder is currently attached for connector.
16118 */
16119struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
16120{
16121 return &intel_attached_encoder(connector)->base;
16122}
16123
16124void intel_connector_attach_encoder(struct intel_connector *connector, 16270void intel_connector_attach_encoder(struct intel_connector *connector,
16125 struct intel_encoder *encoder) 16271 struct intel_encoder *encoder)
16126{ 16272{
@@ -16204,9 +16350,8 @@ struct intel_display_error_state {
16204}; 16350};
16205 16351
16206struct intel_display_error_state * 16352struct intel_display_error_state *
16207intel_display_capture_error_state(struct drm_device *dev) 16353intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16208{ 16354{
16209 struct drm_i915_private *dev_priv = dev->dev_private;
16210 struct intel_display_error_state *error; 16355 struct intel_display_error_state *error;
16211 int transcoders[] = { 16356 int transcoders[] = {
16212 TRANSCODER_A, 16357 TRANSCODER_A,
@@ -16216,14 +16361,14 @@ intel_display_capture_error_state(struct drm_device *dev)
16216 }; 16361 };
16217 int i; 16362 int i;
16218 16363
16219 if (INTEL_INFO(dev)->num_pipes == 0) 16364 if (INTEL_INFO(dev_priv)->num_pipes == 0)
16220 return NULL; 16365 return NULL;
16221 16366
16222 error = kzalloc(sizeof(*error), GFP_ATOMIC); 16367 error = kzalloc(sizeof(*error), GFP_ATOMIC);
16223 if (error == NULL) 16368 if (error == NULL)
16224 return NULL; 16369 return NULL;
16225 16370
16226 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 16371 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16227 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 16372 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
16228 16373
16229 for_each_pipe(dev_priv, i) { 16374 for_each_pipe(dev_priv, i) {
@@ -16239,25 +16384,25 @@ intel_display_capture_error_state(struct drm_device *dev)
16239 16384
16240 error->plane[i].control = I915_READ(DSPCNTR(i)); 16385 error->plane[i].control = I915_READ(DSPCNTR(i));
16241 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 16386 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16242 if (INTEL_INFO(dev)->gen <= 3) { 16387 if (INTEL_GEN(dev_priv) <= 3) {
16243 error->plane[i].size = I915_READ(DSPSIZE(i)); 16388 error->plane[i].size = I915_READ(DSPSIZE(i));
16244 error->plane[i].pos = I915_READ(DSPPOS(i)); 16389 error->plane[i].pos = I915_READ(DSPPOS(i));
16245 } 16390 }
16246 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 16391 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16247 error->plane[i].addr = I915_READ(DSPADDR(i)); 16392 error->plane[i].addr = I915_READ(DSPADDR(i));
16248 if (INTEL_INFO(dev)->gen >= 4) { 16393 if (INTEL_GEN(dev_priv) >= 4) {
16249 error->plane[i].surface = I915_READ(DSPSURF(i)); 16394 error->plane[i].surface = I915_READ(DSPSURF(i));
16250 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 16395 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16251 } 16396 }
16252 16397
16253 error->pipe[i].source = I915_READ(PIPESRC(i)); 16398 error->pipe[i].source = I915_READ(PIPESRC(i));
16254 16399
16255 if (HAS_GMCH_DISPLAY(dev)) 16400 if (HAS_GMCH_DISPLAY(dev_priv))
16256 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 16401 error->pipe[i].stat = I915_READ(PIPESTAT(i));
16257 } 16402 }
16258 16403
16259 /* Note: this does not include DSI transcoders. */ 16404 /* Note: this does not include DSI transcoders. */
16260 error->num_transcoders = INTEL_INFO(dev)->num_pipes; 16405 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
16261 if (HAS_DDI(dev_priv)) 16406 if (HAS_DDI(dev_priv))
16262 error->num_transcoders++; /* Account for eDP. */ 16407 error->num_transcoders++; /* Account for eDP. */
16263 16408
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f192f58708c2..be083519dac9 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -131,11 +131,6 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe); 131 enum pipe pipe);
132static void intel_dp_unset_edid(struct intel_dp *intel_dp); 132static void intel_dp_unset_edid(struct intel_dp *intel_dp);
133 133
134static unsigned int intel_dp_unused_lane_mask(int lane_count)
135{
136 return ~((1 << lane_count) - 1) & 0xf;
137}
138
139static int 134static int
140intel_dp_max_link_bw(struct intel_dp *intel_dp) 135intel_dp_max_link_bw(struct intel_dp *intel_dp)
141{ 136{
@@ -775,6 +770,7 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
775 DP_AUX_CH_CTL_TIME_OUT_1600us | 770 DP_AUX_CH_CTL_TIME_OUT_1600us |
776 DP_AUX_CH_CTL_RECEIVE_ERROR | 771 DP_AUX_CH_CTL_RECEIVE_ERROR |
777 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 772 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
773 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
778 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 774 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
779} 775}
780 776
@@ -1582,6 +1578,27 @@ found:
1582 &pipe_config->dp_m2_n2); 1578 &pipe_config->dp_m2_n2);
1583 } 1579 }
1584 1580
1581 /*
1582 * DPLL0 VCO may need to be adjusted to get the correct
1583 * clock for eDP. This will affect cdclk as well.
1584 */
1585 if (is_edp(intel_dp) &&
1586 (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) {
1587 int vco;
1588
1589 switch (pipe_config->port_clock / 2) {
1590 case 108000:
1591 case 216000:
1592 vco = 8640000;
1593 break;
1594 default:
1595 vco = 8100000;
1596 break;
1597 }
1598
1599 to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
1600 }
1601
1585 if (!HAS_DDI(dev)) 1602 if (!HAS_DDI(dev))
1586 intel_dp_set_clock(encoder, pipe_config); 1603 intel_dp_set_clock(encoder, pipe_config);
1587 1604
@@ -2460,50 +2477,6 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder)
2460 intel_dp_link_down(intel_dp); 2477 intel_dp_link_down(intel_dp);
2461} 2478}
2462 2479
2463static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2464 bool reset)
2465{
2466 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2467 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2468 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2469 enum pipe pipe = crtc->pipe;
2470 uint32_t val;
2471
2472 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2473 if (reset)
2474 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2475 else
2476 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2477 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2478
2479 if (crtc->config->lane_count > 2) {
2480 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2481 if (reset)
2482 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2483 else
2484 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2485 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2486 }
2487
2488 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2489 val |= CHV_PCS_REQ_SOFTRESET_EN;
2490 if (reset)
2491 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2492 else
2493 val |= DPIO_PCS_CLK_SOFT_RESET;
2494 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2495
2496 if (crtc->config->lane_count > 2) {
2497 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2498 val |= CHV_PCS_REQ_SOFTRESET_EN;
2499 if (reset)
2500 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2501 else
2502 val |= DPIO_PCS_CLK_SOFT_RESET;
2503 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2504 }
2505}
2506
2507static void chv_post_disable_dp(struct intel_encoder *encoder) 2480static void chv_post_disable_dp(struct intel_encoder *encoder)
2508{ 2481{
2509 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2482 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
@@ -2811,266 +2784,38 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2811 2784
2812static void vlv_pre_enable_dp(struct intel_encoder *encoder) 2785static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2813{ 2786{
2814 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2787 vlv_phy_pre_encoder_enable(encoder);
2815 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2816 struct drm_device *dev = encoder->base.dev;
2817 struct drm_i915_private *dev_priv = dev->dev_private;
2818 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2819 enum dpio_channel port = vlv_dport_to_channel(dport);
2820 int pipe = intel_crtc->pipe;
2821 u32 val;
2822
2823 mutex_lock(&dev_priv->sb_lock);
2824
2825 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2826 val = 0;
2827 if (pipe)
2828 val |= (1<<21);
2829 else
2830 val &= ~(1<<21);
2831 val |= 0x001000c4;
2832 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2833 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2834 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2835
2836 mutex_unlock(&dev_priv->sb_lock);
2837 2788
2838 intel_enable_dp(encoder); 2789 intel_enable_dp(encoder);
2839} 2790}
2840 2791
2841static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) 2792static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2842{ 2793{
2843 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2844 struct drm_device *dev = encoder->base.dev;
2845 struct drm_i915_private *dev_priv = dev->dev_private;
2846 struct intel_crtc *intel_crtc =
2847 to_intel_crtc(encoder->base.crtc);
2848 enum dpio_channel port = vlv_dport_to_channel(dport);
2849 int pipe = intel_crtc->pipe;
2850
2851 intel_dp_prepare(encoder); 2794 intel_dp_prepare(encoder);
2852 2795
2853 /* Program Tx lane resets to default */ 2796 vlv_phy_pre_pll_enable(encoder);
2854 mutex_lock(&dev_priv->sb_lock);
2855 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2856 DPIO_PCS_TX_LANE2_RESET |
2857 DPIO_PCS_TX_LANE1_RESET);
2858 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2859 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2860 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2861 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2862 DPIO_PCS_CLK_SOFT_RESET);
2863
2864 /* Fix up inter-pair skew failure */
2865 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2866 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2867 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2868 mutex_unlock(&dev_priv->sb_lock);
2869} 2797}
2870 2798
2871static void chv_pre_enable_dp(struct intel_encoder *encoder) 2799static void chv_pre_enable_dp(struct intel_encoder *encoder)
2872{ 2800{
2873 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2801 chv_phy_pre_encoder_enable(encoder);
2874 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2875 struct drm_device *dev = encoder->base.dev;
2876 struct drm_i915_private *dev_priv = dev->dev_private;
2877 struct intel_crtc *intel_crtc =
2878 to_intel_crtc(encoder->base.crtc);
2879 enum dpio_channel ch = vlv_dport_to_channel(dport);
2880 int pipe = intel_crtc->pipe;
2881 int data, i, stagger;
2882 u32 val;
2883
2884 mutex_lock(&dev_priv->sb_lock);
2885
2886 /* allow hardware to manage TX FIFO reset source */
2887 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2888 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2889 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2890
2891 if (intel_crtc->config->lane_count > 2) {
2892 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2893 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2894 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2895 }
2896
2897 /* Program Tx lane latency optimal setting*/
2898 for (i = 0; i < intel_crtc->config->lane_count; i++) {
2899 /* Set the upar bit */
2900 if (intel_crtc->config->lane_count == 1)
2901 data = 0x0;
2902 else
2903 data = (i == 1) ? 0x0 : 0x1;
2904 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2905 data << DPIO_UPAR_SHIFT);
2906 }
2907
2908 /* Data lane stagger programming */
2909 if (intel_crtc->config->port_clock > 270000)
2910 stagger = 0x18;
2911 else if (intel_crtc->config->port_clock > 135000)
2912 stagger = 0xd;
2913 else if (intel_crtc->config->port_clock > 67500)
2914 stagger = 0x7;
2915 else if (intel_crtc->config->port_clock > 33750)
2916 stagger = 0x4;
2917 else
2918 stagger = 0x2;
2919
2920 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2921 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2922 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2923
2924 if (intel_crtc->config->lane_count > 2) {
2925 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2926 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2927 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2928 }
2929
2930 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2931 DPIO_LANESTAGGER_STRAP(stagger) |
2932 DPIO_LANESTAGGER_STRAP_OVRD |
2933 DPIO_TX1_STAGGER_MASK(0x1f) |
2934 DPIO_TX1_STAGGER_MULT(6) |
2935 DPIO_TX2_STAGGER_MULT(0));
2936
2937 if (intel_crtc->config->lane_count > 2) {
2938 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2939 DPIO_LANESTAGGER_STRAP(stagger) |
2940 DPIO_LANESTAGGER_STRAP_OVRD |
2941 DPIO_TX1_STAGGER_MASK(0x1f) |
2942 DPIO_TX1_STAGGER_MULT(7) |
2943 DPIO_TX2_STAGGER_MULT(5));
2944 }
2945
2946 /* Deassert data lane reset */
2947 chv_data_lane_soft_reset(encoder, false);
2948
2949 mutex_unlock(&dev_priv->sb_lock);
2950 2802
2951 intel_enable_dp(encoder); 2803 intel_enable_dp(encoder);
2952 2804
2953 /* Second common lane will stay alive on its own now */ 2805 /* Second common lane will stay alive on its own now */
2954 if (dport->release_cl2_override) { 2806 chv_phy_release_cl2_override(encoder);
2955 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2956 dport->release_cl2_override = false;
2957 }
2958} 2807}
2959 2808
2960static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) 2809static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2961{ 2810{
2962 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2963 struct drm_device *dev = encoder->base.dev;
2964 struct drm_i915_private *dev_priv = dev->dev_private;
2965 struct intel_crtc *intel_crtc =
2966 to_intel_crtc(encoder->base.crtc);
2967 enum dpio_channel ch = vlv_dport_to_channel(dport);
2968 enum pipe pipe = intel_crtc->pipe;
2969 unsigned int lane_mask =
2970 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
2971 u32 val;
2972
2973 intel_dp_prepare(encoder); 2811 intel_dp_prepare(encoder);
2974 2812
2975 /* 2813 chv_phy_pre_pll_enable(encoder);
2976 * Must trick the second common lane into life.
2977 * Otherwise we can't even access the PLL.
2978 */
2979 if (ch == DPIO_CH0 && pipe == PIPE_B)
2980 dport->release_cl2_override =
2981 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2982
2983 chv_phy_powergate_lanes(encoder, true, lane_mask);
2984
2985 mutex_lock(&dev_priv->sb_lock);
2986
2987 /* Assert data lane reset */
2988 chv_data_lane_soft_reset(encoder, true);
2989
2990 /* program left/right clock distribution */
2991 if (pipe != PIPE_B) {
2992 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2993 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2994 if (ch == DPIO_CH0)
2995 val |= CHV_BUFLEFTENA1_FORCE;
2996 if (ch == DPIO_CH1)
2997 val |= CHV_BUFRIGHTENA1_FORCE;
2998 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2999 } else {
3000 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3001 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3002 if (ch == DPIO_CH0)
3003 val |= CHV_BUFLEFTENA2_FORCE;
3004 if (ch == DPIO_CH1)
3005 val |= CHV_BUFRIGHTENA2_FORCE;
3006 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3007 }
3008
3009 /* program clock channel usage */
3010 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3011 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3012 if (pipe != PIPE_B)
3013 val &= ~CHV_PCS_USEDCLKCHANNEL;
3014 else
3015 val |= CHV_PCS_USEDCLKCHANNEL;
3016 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3017
3018 if (intel_crtc->config->lane_count > 2) {
3019 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3020 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3021 if (pipe != PIPE_B)
3022 val &= ~CHV_PCS_USEDCLKCHANNEL;
3023 else
3024 val |= CHV_PCS_USEDCLKCHANNEL;
3025 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3026 }
3027
3028 /*
3029 * This a a bit weird since generally CL
3030 * matches the pipe, but here we need to
3031 * pick the CL based on the port.
3032 */
3033 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3034 if (pipe != PIPE_B)
3035 val &= ~CHV_CMN_USEDCLKCHANNEL;
3036 else
3037 val |= CHV_CMN_USEDCLKCHANNEL;
3038 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3039
3040 mutex_unlock(&dev_priv->sb_lock);
3041} 2814}
3042 2815
3043static void chv_dp_post_pll_disable(struct intel_encoder *encoder) 2816static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3044{ 2817{
3045 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2818 chv_phy_post_pll_disable(encoder);
3046 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3047 u32 val;
3048
3049 mutex_lock(&dev_priv->sb_lock);
3050
3051 /* disable left/right clock distribution */
3052 if (pipe != PIPE_B) {
3053 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3054 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3055 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3056 } else {
3057 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3058 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3059 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3060 }
3061
3062 mutex_unlock(&dev_priv->sb_lock);
3063
3064 /*
3065 * Leave the power down bit cleared for at least one
3066 * lane so that chv_powergate_phy_ch() will power
3067 * on something when the channel is otherwise unused.
3068 * When the port is off and the override is removed
3069 * the lanes power down anyway, so otherwise it doesn't
3070 * really matter what the state of power down bits is
3071 * after this.
3072 */
3073 chv_phy_powergate_lanes(encoder, false, 0x0);
3074} 2819}
3075 2820
3076/* 2821/*
@@ -3178,16 +2923,10 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3178 2923
3179static uint32_t vlv_signal_levels(struct intel_dp *intel_dp) 2924static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3180{ 2925{
3181 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2926 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3182 struct drm_i915_private *dev_priv = dev->dev_private;
3183 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3184 struct intel_crtc *intel_crtc =
3185 to_intel_crtc(dport->base.base.crtc);
3186 unsigned long demph_reg_value, preemph_reg_value, 2927 unsigned long demph_reg_value, preemph_reg_value,
3187 uniqtranscale_reg_value; 2928 uniqtranscale_reg_value;
3188 uint8_t train_set = intel_dp->train_set[0]; 2929 uint8_t train_set = intel_dp->train_set[0];
3189 enum dpio_channel port = vlv_dport_to_channel(dport);
3190 int pipe = intel_crtc->pipe;
3191 2930
3192 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2931 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3193 case DP_TRAIN_PRE_EMPH_LEVEL_0: 2932 case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3262,37 +3001,18 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3262 return 0; 3001 return 0;
3263 } 3002 }
3264 3003
3265 mutex_lock(&dev_priv->sb_lock); 3004 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3266 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); 3005 uniqtranscale_reg_value, 0);
3267 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3268 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3269 uniqtranscale_reg_value);
3270 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3271 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3272 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3273 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3274 mutex_unlock(&dev_priv->sb_lock);
3275 3006
3276 return 0; 3007 return 0;
3277} 3008}
3278 3009
3279static bool chv_need_uniq_trans_scale(uint8_t train_set)
3280{
3281 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3282 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3283}
3284
3285static uint32_t chv_signal_levels(struct intel_dp *intel_dp) 3010static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3286{ 3011{
3287 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3012 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3288 struct drm_i915_private *dev_priv = dev->dev_private; 3013 u32 deemph_reg_value, margin_reg_value;
3289 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 3014 bool uniq_trans_scale = false;
3290 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3291 u32 deemph_reg_value, margin_reg_value, val;
3292 uint8_t train_set = intel_dp->train_set[0]; 3015 uint8_t train_set = intel_dp->train_set[0];
3293 enum dpio_channel ch = vlv_dport_to_channel(dport);
3294 enum pipe pipe = intel_crtc->pipe;
3295 int i;
3296 3016
3297 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3017 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3298 case DP_TRAIN_PRE_EMPH_LEVEL_0: 3018 case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3312,7 +3032,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3032 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3313 deemph_reg_value = 128; 3033 deemph_reg_value = 128;
3314 margin_reg_value = 154; 3034 margin_reg_value = 154;
3315 /* FIXME extra to set for 1200 */ 3035 uniq_trans_scale = true;
3316 break; 3036 break;
3317 default: 3037 default:
3318 return 0; 3038 return 0;
@@ -3364,88 +3084,8 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3364 return 0; 3084 return 0;
3365 } 3085 }
3366 3086
3367 mutex_lock(&dev_priv->sb_lock); 3087 chv_set_phy_signal_level(encoder, deemph_reg_value,
3368 3088 margin_reg_value, uniq_trans_scale);
3369 /* Clear calc init */
3370 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3371 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3372 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3373 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3374 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3375
3376 if (intel_crtc->config->lane_count > 2) {
3377 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3378 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3379 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3380 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3381 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3382 }
3383
3384 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3385 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3386 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3387 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3388
3389 if (intel_crtc->config->lane_count > 2) {
3390 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3391 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3392 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3393 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3394 }
3395
3396 /* Program swing deemph */
3397 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3398 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3399 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3400 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3401 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3402 }
3403
3404 /* Program swing margin */
3405 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3406 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3407
3408 val &= ~DPIO_SWING_MARGIN000_MASK;
3409 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3410
3411 /*
3412 * Supposedly this value shouldn't matter when unique transition
3413 * scale is disabled, but in fact it does matter. Let's just
3414 * always program the same value and hope it's OK.
3415 */
3416 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3417 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3418
3419 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3420 }
3421
3422 /*
3423 * The document said it needs to set bit 27 for ch0 and bit 26
3424 * for ch1. Might be a typo in the doc.
3425 * For now, for this unique transition scale selection, set bit
3426 * 27 for ch0 and ch1.
3427 */
3428 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3429 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3430 if (chv_need_uniq_trans_scale(train_set))
3431 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3432 else
3433 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3434 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3435 }
3436
3437 /* Start swing calculation */
3438 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3439 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3440 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3441
3442 if (intel_crtc->config->lane_count > 2) {
3443 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3444 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3445 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3446 }
3447
3448 mutex_unlock(&dev_priv->sb_lock);
3449 3089
3450 return 0; 3090 return 0;
3451} 3091}
@@ -3714,7 +3354,6 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3714 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3354 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3715 struct drm_device *dev = dig_port->base.base.dev; 3355 struct drm_device *dev = dig_port->base.base.dev;
3716 struct drm_i915_private *dev_priv = dev->dev_private; 3356 struct drm_i915_private *dev_priv = dev->dev_private;
3717 uint8_t rev;
3718 3357
3719 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, 3358 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3720 sizeof(intel_dp->dpcd)) < 0) 3359 sizeof(intel_dp->dpcd)) < 0)
@@ -3771,6 +3410,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3771 DRM_DEBUG_KMS("PSR2 %s on sink", 3410 DRM_DEBUG_KMS("PSR2 %s on sink",
3772 dev_priv->psr.psr2_support ? "supported" : "not supported"); 3411 dev_priv->psr.psr2_support ? "supported" : "not supported");
3773 } 3412 }
3413
3414 /* Read the eDP Display control capabilities registers */
3415 memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd));
3416 if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3417 (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3418 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3419 sizeof(intel_dp->edp_dpcd)))
3420 DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3421 intel_dp->edp_dpcd);
3774 } 3422 }
3775 3423
3776 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n", 3424 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
@@ -3778,10 +3426,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3778 yesno(drm_dp_tps3_supported(intel_dp->dpcd))); 3426 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3779 3427
3780 /* Intermediate frequency support */ 3428 /* Intermediate frequency support */
3781 if (is_edp(intel_dp) && 3429 if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */
3782 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3783 (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3784 (rev >= 0x03)) { /* eDp v1.4 or higher */
3785 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 3430 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3786 int i; 3431 int i;
3787 3432
@@ -4935,7 +4580,6 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
4935static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 4580static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4936 .get_modes = intel_dp_get_modes, 4581 .get_modes = intel_dp_get_modes,
4937 .mode_valid = intel_dp_mode_valid, 4582 .mode_valid = intel_dp_mode_valid,
4938 .best_encoder = intel_best_encoder,
4939}; 4583};
4940 4584
4941static const struct drm_encoder_funcs intel_dp_enc_funcs = { 4585static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -5590,14 +5234,14 @@ void intel_edp_drrs_flush(struct drm_device *dev,
5590 * 5234 *
5591 * DRRS saves power by switching to low RR based on usage scenarios. 5235 * DRRS saves power by switching to low RR based on usage scenarios.
5592 * 5236 *
5593 * eDP DRRS:- 5237 * The implementation is based on frontbuffer tracking implementation. When
5594 * The implementation is based on frontbuffer tracking implementation. 5238 * there is a disturbance on the screen triggered by user activity or a periodic
5595 * When there is a disturbance on the screen triggered by user activity or a 5239 * system activity, DRRS is disabled (RR is changed to high RR). When there is
5596 * periodic system activity, DRRS is disabled (RR is changed to high RR). 5240 * no movement on screen, after a timeout of 1 second, a switch to low RR is
5597 * When there is no movement on screen, after a timeout of 1 second, a switch 5241 * made.
5598 * to low RR is made. 5242 *
5599 * For integration with frontbuffer tracking code, 5243 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
5600 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called. 5244 * and intel_edp_drrs_flush() are called.
5601 * 5245 *
5602 * DRRS can be further extended to support other internal panels and also 5246 * DRRS can be further extended to support other internal panels and also
5603 * the scenario of video playback wherein RR is set based on the rate 5247 * the scenario of video playback wherein RR is set based on the rate
@@ -5725,8 +5369,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5725 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { 5369 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5726 fixed_mode = drm_mode_duplicate(dev, 5370 fixed_mode = drm_mode_duplicate(dev,
5727 dev_priv->vbt.lfp_lvds_vbt_mode); 5371 dev_priv->vbt.lfp_lvds_vbt_mode);
5728 if (fixed_mode) 5372 if (fixed_mode) {
5729 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 5373 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5374 connector->display_info.width_mm = fixed_mode->width_mm;
5375 connector->display_info.height_mm = fixed_mode->height_mm;
5376 }
5730 } 5377 }
5731 mutex_unlock(&dev->mode_config.mutex); 5378 mutex_unlock(&dev->mode_config.mutex);
5732 5379
@@ -5923,9 +5570,9 @@ fail:
5923 return false; 5570 return false;
5924} 5571}
5925 5572
5926void 5573bool intel_dp_init(struct drm_device *dev,
5927intel_dp_init(struct drm_device *dev, 5574 i915_reg_t output_reg,
5928 i915_reg_t output_reg, enum port port) 5575 enum port port)
5929{ 5576{
5930 struct drm_i915_private *dev_priv = dev->dev_private; 5577 struct drm_i915_private *dev_priv = dev->dev_private;
5931 struct intel_digital_port *intel_dig_port; 5578 struct intel_digital_port *intel_dig_port;
@@ -5935,7 +5582,7 @@ intel_dp_init(struct drm_device *dev,
5935 5582
5936 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); 5583 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5937 if (!intel_dig_port) 5584 if (!intel_dig_port)
5938 return; 5585 return false;
5939 5586
5940 intel_connector = intel_connector_alloc(); 5587 intel_connector = intel_connector_alloc();
5941 if (!intel_connector) 5588 if (!intel_connector)
@@ -5945,7 +5592,7 @@ intel_dp_init(struct drm_device *dev,
5945 encoder = &intel_encoder->base; 5592 encoder = &intel_encoder->base;
5946 5593
5947 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 5594 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5948 DRM_MODE_ENCODER_TMDS, NULL)) 5595 DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port)))
5949 goto err_encoder_init; 5596 goto err_encoder_init;
5950 5597
5951 intel_encoder->compute_config = intel_dp_compute_config; 5598 intel_encoder->compute_config = intel_dp_compute_config;
@@ -5992,7 +5639,7 @@ intel_dp_init(struct drm_device *dev,
5992 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) 5639 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
5993 goto err_init_connector; 5640 goto err_init_connector;
5994 5641
5995 return; 5642 return true;
5996 5643
5997err_init_connector: 5644err_init_connector:
5998 drm_encoder_cleanup(encoder); 5645 drm_encoder_cleanup(encoder);
@@ -6000,8 +5647,7 @@ err_encoder_init:
6000 kfree(intel_connector); 5647 kfree(intel_connector);
6001err_connector_alloc: 5648err_connector_alloc:
6002 kfree(intel_dig_port); 5649 kfree(intel_dig_port);
6003 5650 return false;
6004 return;
6005} 5651}
6006 5652
6007void intel_dp_mst_suspend(struct drm_device *dev) 5653void intel_dp_mst_suspend(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
new file mode 100644
index 000000000000..6532e226db29
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -0,0 +1,172 @@
1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "intel_drv.h"
26
27static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
28{
29 uint8_t reg_val = 0;
30
31 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
32 &reg_val) < 0) {
33 DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
34 DP_EDP_DISPLAY_CONTROL_REGISTER);
35 return;
36 }
37 if (enable)
38 reg_val |= DP_EDP_BACKLIGHT_ENABLE;
39 else
40 reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE);
41
42 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
43 reg_val) != 1) {
44 DRM_DEBUG_KMS("Failed to %s aux backlight\n",
45 enable ? "enable" : "disable");
46 }
47}
48
49/*
50 * Read the current backlight value from DPCD register(s) based
51 * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
52 */
53static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector)
54{
55 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
56 uint8_t read_val[2] = { 0x0 };
57 uint16_t level = 0;
58
59 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
60 &read_val, sizeof(read_val)) < 0) {
61 DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
62 DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
63 return 0;
64 }
65 level = read_val[0];
66 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
67 level = (read_val[0] << 8 | read_val[1]);
68
69 return level;
70}
71
72/*
73 * Sends the current backlight level over the aux channel, checking if its using
74 * 8-bit or 16 bit value (MSB and LSB)
75 */
76static void
77intel_dp_aux_set_backlight(struct intel_connector *connector, u32 level)
78{
79 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
80 uint8_t vals[2] = { 0x0 };
81
82 vals[0] = level;
83
84 /* Write the MSB and/or LSB */
85 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) {
86 vals[0] = (level & 0xFF00) >> 8;
87 vals[1] = (level & 0xFF);
88 }
89 if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
90 vals, sizeof(vals)) < 0) {
91 DRM_DEBUG_KMS("Failed to write aux backlight level\n");
92 return;
93 }
94}
95
96static void intel_dp_aux_enable_backlight(struct intel_connector *connector)
97{
98 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
99 uint8_t dpcd_buf = 0;
100
101 set_aux_backlight_enable(intel_dp, true);
102
103 if ((drm_dp_dpcd_readb(&intel_dp->aux,
104 DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) == 1) &&
105 ((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) ==
106 DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET))
107 drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
108 (dpcd_buf | DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD));
109}
110
111static void intel_dp_aux_disable_backlight(struct intel_connector *connector)
112{
113 set_aux_backlight_enable(enc_to_intel_dp(&connector->encoder->base), false);
114}
115
116static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
117 enum pipe pipe)
118{
119 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
120 struct intel_panel *panel = &connector->panel;
121
122 intel_dp_aux_enable_backlight(connector);
123
124 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
125 panel->backlight.max = 0xFFFF;
126 else
127 panel->backlight.max = 0xFF;
128
129 panel->backlight.min = 0;
130 panel->backlight.level = intel_dp_aux_get_backlight(connector);
131
132 panel->backlight.enabled = panel->backlight.level != 0;
133
134 return 0;
135}
136
137static bool
138intel_dp_aux_display_control_capable(struct intel_connector *connector)
139{
140 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
141
142 /* Check the eDP Display control capabilities registers to determine if
143 * the panel can support backlight control over the aux channel
144 */
145 if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
146 (intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
147 !((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_PIN_ENABLE_CAP) ||
148 (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP))) {
149 DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
150 return true;
151 }
152 return false;
153}
154
155int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
156{
157 struct intel_panel *panel = &intel_connector->panel;
158
159 if (!i915.enable_dpcd_backlight)
160 return -ENODEV;
161
162 if (!intel_dp_aux_display_control_capable(intel_connector))
163 return -ENODEV;
164
165 panel->backlight.setup = intel_dp_aux_setup_backlight;
166 panel->backlight.enable = intel_dp_aux_enable_backlight;
167 panel->backlight.disable = intel_dp_aux_disable_backlight;
168 panel->backlight.set = intel_dp_aux_set_backlight;
169 panel->backlight.get = intel_dp_aux_get_backlight;
170
171 return 0;
172}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 7a34090cef34..f62ca9a126b3 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -534,7 +534,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
534 intel_mst->primary = intel_dig_port; 534 intel_mst->primary = intel_dig_port;
535 535
536 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs, 536 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
537 DRM_MODE_ENCODER_DPMST, NULL); 537 DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
538 538
539 intel_encoder->type = INTEL_OUTPUT_DP_MST; 539 intel_encoder->type = INTEL_OUTPUT_DP_MST;
540 intel_encoder->crtc_mask = 0x7; 540 intel_encoder->crtc_mask = 0x7;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
new file mode 100644
index 000000000000..288da35572b4
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -0,0 +1,470 @@
1/*
2 * Copyright © 2014-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#include "intel_drv.h"
25
26void chv_set_phy_signal_level(struct intel_encoder *encoder,
27 u32 deemph_reg_value, u32 margin_reg_value,
28 bool uniq_trans_scale)
29{
30 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
31 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
32 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
33 enum dpio_channel ch = vlv_dport_to_channel(dport);
34 enum pipe pipe = intel_crtc->pipe;
35 u32 val;
36 int i;
37
38 mutex_lock(&dev_priv->sb_lock);
39
40 /* Clear calc init */
41 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
42 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
43 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
44 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
45 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
46
47 if (intel_crtc->config->lane_count > 2) {
48 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
49 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
50 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
51 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
52 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
53 }
54
55 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
56 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
57 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
58 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
59
60 if (intel_crtc->config->lane_count > 2) {
61 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
62 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
63 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
64 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
65 }
66
67 /* Program swing deemph */
68 for (i = 0; i < intel_crtc->config->lane_count; i++) {
69 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
70 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
71 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
72 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
73 }
74
75 /* Program swing margin */
76 for (i = 0; i < intel_crtc->config->lane_count; i++) {
77 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
78
79 val &= ~DPIO_SWING_MARGIN000_MASK;
80 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
81
82 /*
83 * Supposedly this value shouldn't matter when unique transition
84 * scale is disabled, but in fact it does matter. Let's just
85 * always program the same value and hope it's OK.
86 */
87 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
88 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
89
90 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
91 }
92
93 /*
94 * The document said it needs to set bit 27 for ch0 and bit 26
95 * for ch1. Might be a typo in the doc.
96 * For now, for this unique transition scale selection, set bit
97 * 27 for ch0 and ch1.
98 */
99 for (i = 0; i < intel_crtc->config->lane_count; i++) {
100 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
101 if (uniq_trans_scale)
102 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
103 else
104 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
105 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
106 }
107
108 /* Start swing calculation */
109 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
110 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
111 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
112
113 if (intel_crtc->config->lane_count > 2) {
114 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
115 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
116 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
117 }
118
119 mutex_unlock(&dev_priv->sb_lock);
120
121}
122
123void chv_data_lane_soft_reset(struct intel_encoder *encoder,
124 bool reset)
125{
126 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
127 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
128 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
129 enum pipe pipe = crtc->pipe;
130 uint32_t val;
131
132 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
133 if (reset)
134 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
135 else
136 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
137 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
138
139 if (crtc->config->lane_count > 2) {
140 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
141 if (reset)
142 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
143 else
144 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
145 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
146 }
147
148 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
149 val |= CHV_PCS_REQ_SOFTRESET_EN;
150 if (reset)
151 val &= ~DPIO_PCS_CLK_SOFT_RESET;
152 else
153 val |= DPIO_PCS_CLK_SOFT_RESET;
154 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
155
156 if (crtc->config->lane_count > 2) {
157 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
158 val |= CHV_PCS_REQ_SOFTRESET_EN;
159 if (reset)
160 val &= ~DPIO_PCS_CLK_SOFT_RESET;
161 else
162 val |= DPIO_PCS_CLK_SOFT_RESET;
163 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
164 }
165}
166
167void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
168{
169 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
170 struct drm_device *dev = encoder->base.dev;
171 struct drm_i915_private *dev_priv = dev->dev_private;
172 struct intel_crtc *intel_crtc =
173 to_intel_crtc(encoder->base.crtc);
174 enum dpio_channel ch = vlv_dport_to_channel(dport);
175 enum pipe pipe = intel_crtc->pipe;
176 unsigned int lane_mask =
177 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
178 u32 val;
179
180 /*
181 * Must trick the second common lane into life.
182 * Otherwise we can't even access the PLL.
183 */
184 if (ch == DPIO_CH0 && pipe == PIPE_B)
185 dport->release_cl2_override =
186 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
187
188 chv_phy_powergate_lanes(encoder, true, lane_mask);
189
190 mutex_lock(&dev_priv->sb_lock);
191
192 /* Assert data lane reset */
193 chv_data_lane_soft_reset(encoder, true);
194
195 /* program left/right clock distribution */
196 if (pipe != PIPE_B) {
197 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
198 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
199 if (ch == DPIO_CH0)
200 val |= CHV_BUFLEFTENA1_FORCE;
201 if (ch == DPIO_CH1)
202 val |= CHV_BUFRIGHTENA1_FORCE;
203 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
204 } else {
205 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
206 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
207 if (ch == DPIO_CH0)
208 val |= CHV_BUFLEFTENA2_FORCE;
209 if (ch == DPIO_CH1)
210 val |= CHV_BUFRIGHTENA2_FORCE;
211 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
212 }
213
214 /* program clock channel usage */
215 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
216 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
217 if (pipe != PIPE_B)
218 val &= ~CHV_PCS_USEDCLKCHANNEL;
219 else
220 val |= CHV_PCS_USEDCLKCHANNEL;
221 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
222
223 if (intel_crtc->config->lane_count > 2) {
224 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
225 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
226 if (pipe != PIPE_B)
227 val &= ~CHV_PCS_USEDCLKCHANNEL;
228 else
229 val |= CHV_PCS_USEDCLKCHANNEL;
230 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
231 }
232
233 /*
234 * This a a bit weird since generally CL
235 * matches the pipe, but here we need to
236 * pick the CL based on the port.
237 */
238 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
239 if (pipe != PIPE_B)
240 val &= ~CHV_CMN_USEDCLKCHANNEL;
241 else
242 val |= CHV_CMN_USEDCLKCHANNEL;
243 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
244
245 mutex_unlock(&dev_priv->sb_lock);
246}
247
248void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
249{
250 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
251 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
252 struct drm_device *dev = encoder->base.dev;
253 struct drm_i915_private *dev_priv = dev->dev_private;
254 struct intel_crtc *intel_crtc =
255 to_intel_crtc(encoder->base.crtc);
256 enum dpio_channel ch = vlv_dport_to_channel(dport);
257 int pipe = intel_crtc->pipe;
258 int data, i, stagger;
259 u32 val;
260
261 mutex_lock(&dev_priv->sb_lock);
262
263 /* allow hardware to manage TX FIFO reset source */
264 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
265 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
266 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
267
268 if (intel_crtc->config->lane_count > 2) {
269 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
270 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
271 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
272 }
273
274 /* Program Tx lane latency optimal setting*/
275 for (i = 0; i < intel_crtc->config->lane_count; i++) {
276 /* Set the upar bit */
277 if (intel_crtc->config->lane_count == 1)
278 data = 0x0;
279 else
280 data = (i == 1) ? 0x0 : 0x1;
281 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
282 data << DPIO_UPAR_SHIFT);
283 }
284
285 /* Data lane stagger programming */
286 if (intel_crtc->config->port_clock > 270000)
287 stagger = 0x18;
288 else if (intel_crtc->config->port_clock > 135000)
289 stagger = 0xd;
290 else if (intel_crtc->config->port_clock > 67500)
291 stagger = 0x7;
292 else if (intel_crtc->config->port_clock > 33750)
293 stagger = 0x4;
294 else
295 stagger = 0x2;
296
297 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
298 val |= DPIO_TX2_STAGGER_MASK(0x1f);
299 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
300
301 if (intel_crtc->config->lane_count > 2) {
302 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
303 val |= DPIO_TX2_STAGGER_MASK(0x1f);
304 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
305 }
306
307 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
308 DPIO_LANESTAGGER_STRAP(stagger) |
309 DPIO_LANESTAGGER_STRAP_OVRD |
310 DPIO_TX1_STAGGER_MASK(0x1f) |
311 DPIO_TX1_STAGGER_MULT(6) |
312 DPIO_TX2_STAGGER_MULT(0));
313
314 if (intel_crtc->config->lane_count > 2) {
315 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
316 DPIO_LANESTAGGER_STRAP(stagger) |
317 DPIO_LANESTAGGER_STRAP_OVRD |
318 DPIO_TX1_STAGGER_MASK(0x1f) |
319 DPIO_TX1_STAGGER_MULT(7) |
320 DPIO_TX2_STAGGER_MULT(5));
321 }
322
323 /* Deassert data lane reset */
324 chv_data_lane_soft_reset(encoder, false);
325
326 mutex_unlock(&dev_priv->sb_lock);
327}
328
329void chv_phy_release_cl2_override(struct intel_encoder *encoder)
330{
331 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
332 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
333
334 if (dport->release_cl2_override) {
335 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
336 dport->release_cl2_override = false;
337 }
338}
339
340void chv_phy_post_pll_disable(struct intel_encoder *encoder)
341{
342 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
343 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
344 u32 val;
345
346 mutex_lock(&dev_priv->sb_lock);
347
348 /* disable left/right clock distribution */
349 if (pipe != PIPE_B) {
350 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
351 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
352 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
353 } else {
354 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
355 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
356 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
357 }
358
359 mutex_unlock(&dev_priv->sb_lock);
360
361 /*
362 * Leave the power down bit cleared for at least one
363 * lane so that chv_powergate_phy_ch() will power
364 * on something when the channel is otherwise unused.
365 * When the port is off and the override is removed
366 * the lanes power down anyway, so otherwise it doesn't
367 * really matter what the state of power down bits is
368 * after this.
369 */
370 chv_phy_powergate_lanes(encoder, false, 0x0);
371}
372
373void vlv_set_phy_signal_level(struct intel_encoder *encoder,
374 u32 demph_reg_value, u32 preemph_reg_value,
375 u32 uniqtranscale_reg_value, u32 tx3_demph)
376{
377 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
378 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
379 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
380 enum dpio_channel port = vlv_dport_to_channel(dport);
381 int pipe = intel_crtc->pipe;
382
383 mutex_lock(&dev_priv->sb_lock);
384 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
385 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
386 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
387 uniqtranscale_reg_value);
388 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
389
390 if (tx3_demph)
391 vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
392
393 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
394 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
395 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
396 mutex_unlock(&dev_priv->sb_lock);
397}
398
399void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
400{
401 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
402 struct drm_device *dev = encoder->base.dev;
403 struct drm_i915_private *dev_priv = dev->dev_private;
404 struct intel_crtc *intel_crtc =
405 to_intel_crtc(encoder->base.crtc);
406 enum dpio_channel port = vlv_dport_to_channel(dport);
407 int pipe = intel_crtc->pipe;
408
409 /* Program Tx lane resets to default */
410 mutex_lock(&dev_priv->sb_lock);
411 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
412 DPIO_PCS_TX_LANE2_RESET |
413 DPIO_PCS_TX_LANE1_RESET);
414 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
415 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
416 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
417 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
418 DPIO_PCS_CLK_SOFT_RESET);
419
420 /* Fix up inter-pair skew failure */
421 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
422 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
423 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
424 mutex_unlock(&dev_priv->sb_lock);
425}
426
427void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
428{
429 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
430 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
431 struct drm_device *dev = encoder->base.dev;
432 struct drm_i915_private *dev_priv = dev->dev_private;
433 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
434 enum dpio_channel port = vlv_dport_to_channel(dport);
435 int pipe = intel_crtc->pipe;
436 u32 val;
437
438 mutex_lock(&dev_priv->sb_lock);
439
440 /* Enable clock channels for this port */
441 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
442 val = 0;
443 if (pipe)
444 val |= (1<<21);
445 else
446 val &= ~(1<<21);
447 val |= 0x001000c4;
448 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
449
450 /* Program lane clock */
451 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
452 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
453
454 mutex_unlock(&dev_priv->sb_lock);
455}
456
457void vlv_phy_reset_lanes(struct intel_encoder *encoder)
458{
459 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
460 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
461 struct intel_crtc *intel_crtc =
462 to_intel_crtc(encoder->base.crtc);
463 enum dpio_channel port = vlv_dport_to_channel(dport);
464 int pipe = intel_crtc->pipe;
465
466 mutex_lock(&dev_priv->sb_lock);
467 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
468 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
469 mutex_unlock(&dev_priv->sb_lock);
470}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 3ac705936b04..c0eff1571731 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -208,8 +208,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
208 if (memcmp(&crtc_state->dpll_hw_state, 208 if (memcmp(&crtc_state->dpll_hw_state,
209 &shared_dpll[i].hw_state, 209 &shared_dpll[i].hw_state,
210 sizeof(crtc_state->dpll_hw_state)) == 0) { 210 sizeof(crtc_state->dpll_hw_state)) == 0) {
211 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, active %x)\n", 211 DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
212 crtc->base.base.id, pll->name, 212 crtc->base.base.id, crtc->base.name, pll->name,
213 shared_dpll[i].crtc_mask, 213 shared_dpll[i].crtc_mask,
214 pll->active_mask); 214 pll->active_mask);
215 return pll; 215 return pll;
@@ -220,8 +220,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
220 for (i = range_min; i <= range_max; i++) { 220 for (i = range_min; i <= range_max; i++) {
221 pll = &dev_priv->shared_dplls[i]; 221 pll = &dev_priv->shared_dplls[i];
222 if (shared_dpll[i].crtc_mask == 0) { 222 if (shared_dpll[i].crtc_mask == 0) {
223 DRM_DEBUG_KMS("CRTC:%d allocated %s\n", 223 DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
224 crtc->base.base.id, pll->name); 224 crtc->base.base.id, crtc->base.name, pll->name);
225 return pll; 225 return pll;
226 } 226 }
227 } 227 }
@@ -358,14 +358,17 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
358 i = (enum intel_dpll_id) crtc->pipe; 358 i = (enum intel_dpll_id) crtc->pipe;
359 pll = &dev_priv->shared_dplls[i]; 359 pll = &dev_priv->shared_dplls[i];
360 360
361 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 361 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
362 crtc->base.base.id, pll->name); 362 crtc->base.base.id, crtc->base.name, pll->name);
363 } else { 363 } else {
364 pll = intel_find_shared_dpll(crtc, crtc_state, 364 pll = intel_find_shared_dpll(crtc, crtc_state,
365 DPLL_ID_PCH_PLL_A, 365 DPLL_ID_PCH_PLL_A,
366 DPLL_ID_PCH_PLL_B); 366 DPLL_ID_PCH_PLL_B);
367 } 367 }
368 368
369 if (!pll)
370 return NULL;
371
369 /* reference the pll */ 372 /* reference the pll */
370 intel_reference_shared_dpll(pll, crtc_state); 373 intel_reference_shared_dpll(pll, crtc_state);
371 374
@@ -1236,9 +1239,6 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1236 case 162000: 1239 case 162000:
1237 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0); 1240 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1238 break; 1241 break;
1239 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1240 results in CDCLK change. Need to handle the change of CDCLK by
1241 disabling pipes and re-enabling them */
1242 case 108000: 1242 case 108000:
1243 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0); 1243 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1244 break; 1244 break;
@@ -1508,7 +1508,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1508 int clock = crtc_state->port_clock; 1508 int clock = crtc_state->port_clock;
1509 1509
1510 if (encoder->type == INTEL_OUTPUT_HDMI) { 1510 if (encoder->type == INTEL_OUTPUT_HDMI) {
1511 intel_clock_t best_clock; 1511 struct dpll best_clock;
1512 1512
1513 /* Calculate HDMI div */ 1513 /* Calculate HDMI div */
1514 /* 1514 /*
@@ -1613,8 +1613,8 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1613 i = (enum intel_dpll_id) intel_dig_port->port; 1613 i = (enum intel_dpll_id) intel_dig_port->port;
1614 pll = intel_get_shared_dpll_by_id(dev_priv, i); 1614 pll = intel_get_shared_dpll_by_id(dev_priv, i);
1615 1615
1616 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 1616 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
1617 crtc->base.base.id, pll->name); 1617 crtc->base.base.id, crtc->base.name, pll->name);
1618 1618
1619 intel_reference_shared_dpll(pll, crtc_state); 1619 intel_reference_shared_dpll(pll, crtc_state);
1620 1620
@@ -1633,18 +1633,10 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1633static void intel_ddi_pll_init(struct drm_device *dev) 1633static void intel_ddi_pll_init(struct drm_device *dev)
1634{ 1634{
1635 struct drm_i915_private *dev_priv = dev->dev_private; 1635 struct drm_i915_private *dev_priv = dev->dev_private;
1636 uint32_t val = I915_READ(LCPLL_CTL); 1636
1637 1637 if (INTEL_GEN(dev_priv) < 9) {
1638 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 1638 uint32_t val = I915_READ(LCPLL_CTL);
1639 int cdclk_freq; 1639
1640
1641 cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
1642 dev_priv->skl_boot_cdclk = cdclk_freq;
1643 if (skl_sanitize_cdclk(dev_priv))
1644 DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
1645 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
1646 DRM_ERROR("LCPLL1 is disabled\n");
1647 } else if (!IS_BROXTON(dev_priv)) {
1648 /* 1640 /*
1649 * The LCPLL register should be turned on by the BIOS. For now 1641 * The LCPLL register should be turned on by the BIOS. For now
1650 * let's just check its state and print errors in case 1642 * let's just check its state and print errors in case
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a28b4aac1e02..270da8de0acf 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -266,7 +266,7 @@ struct intel_connector {
266 struct intel_dp *mst_port; 266 struct intel_dp *mst_port;
267}; 267};
268 268
269typedef struct dpll { 269struct dpll {
270 /* given values */ 270 /* given values */
271 int n; 271 int n;
272 int m1, m2; 272 int m1, m2;
@@ -276,7 +276,7 @@ typedef struct dpll {
276 int vco; 276 int vco;
277 int m; 277 int m;
278 int p; 278 int p;
279} intel_clock_t; 279};
280 280
281struct intel_atomic_state { 281struct intel_atomic_state {
282 struct drm_atomic_state base; 282 struct drm_atomic_state base;
@@ -291,17 +291,32 @@ struct intel_atomic_state {
291 291
292 bool dpll_set, modeset; 292 bool dpll_set, modeset;
293 293
294 /*
295 * Does this transaction change the pipes that are active? This mask
296 * tracks which CRTC's have changed their active state at the end of
297 * the transaction (not counting the temporary disable during modesets).
298 * This mask should only be non-zero when intel_state->modeset is true,
299 * but the converse is not necessarily true; simply changing a mode may
300 * not flip the final active status of any CRTC's
301 */
302 unsigned int active_pipe_changes;
303
294 unsigned int active_crtcs; 304 unsigned int active_crtcs;
295 unsigned int min_pixclk[I915_MAX_PIPES]; 305 unsigned int min_pixclk[I915_MAX_PIPES];
296 306
307 /* SKL/KBL Only */
308 unsigned int cdclk_pll_vco;
309
297 struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS]; 310 struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
298 struct intel_wm_config wm_config;
299 311
300 /* 312 /*
301 * Current watermarks can't be trusted during hardware readout, so 313 * Current watermarks can't be trusted during hardware readout, so
302 * don't bother calculating intermediate watermarks. 314 * don't bother calculating intermediate watermarks.
303 */ 315 */
304 bool skip_intermediate_wm; 316 bool skip_intermediate_wm;
317
318 /* Gen9+ only */
319 struct skl_wm_values wm_results;
305}; 320};
306 321
307struct intel_plane_state { 322struct intel_plane_state {
@@ -405,6 +420,48 @@ struct skl_pipe_wm {
405 uint32_t linetime; 420 uint32_t linetime;
406}; 421};
407 422
423struct intel_crtc_wm_state {
424 union {
425 struct {
426 /*
427 * Intermediate watermarks; these can be
428 * programmed immediately since they satisfy
429 * both the current configuration we're
430 * switching away from and the new
431 * configuration we're switching to.
432 */
433 struct intel_pipe_wm intermediate;
434
435 /*
436 * Optimal watermarks, programmed post-vblank
437 * when this state is committed.
438 */
439 struct intel_pipe_wm optimal;
440 } ilk;
441
442 struct {
443 /* gen9+ only needs 1-step wm programming */
444 struct skl_pipe_wm optimal;
445
446 /* cached plane data rate */
447 unsigned plane_data_rate[I915_MAX_PLANES];
448 unsigned plane_y_data_rate[I915_MAX_PLANES];
449
450 /* minimum block allocation */
451 uint16_t minimum_blocks[I915_MAX_PLANES];
452 uint16_t minimum_y_blocks[I915_MAX_PLANES];
453 } skl;
454 };
455
456 /*
457 * Platforms with two-step watermark programming will need to
458 * update watermark programming post-vblank to switch from the
459 * safe intermediate watermarks to the optimal final
460 * watermarks.
461 */
462 bool need_postvbl_update;
463};
464
408struct intel_crtc_state { 465struct intel_crtc_state {
409 struct drm_crtc_state base; 466 struct drm_crtc_state base;
410 467
@@ -558,32 +615,7 @@ struct intel_crtc_state {
558 /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */ 615 /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */
559 bool disable_lp_wm; 616 bool disable_lp_wm;
560 617
561 struct { 618 struct intel_crtc_wm_state wm;
562 /*
563 * Optimal watermarks, programmed post-vblank when this state
564 * is committed.
565 */
566 union {
567 struct intel_pipe_wm ilk;
568 struct skl_pipe_wm skl;
569 } optimal;
570
571 /*
572 * Intermediate watermarks; these can be programmed immediately
573 * since they satisfy both the current configuration we're
574 * switching away from and the new configuration we're switching
575 * to.
576 */
577 struct intel_pipe_wm intermediate;
578
579 /*
580 * Platforms with two-step watermark programming will need to
581 * update watermark programming post-vblank to switch from the
582 * safe intermediate watermarks to the optimal final
583 * watermarks.
584 */
585 bool need_postvbl_update;
586 } wm;
587 619
588 /* Gamma mode programmed on the pipe */ 620 /* Gamma mode programmed on the pipe */
589 uint32_t gamma_mode; 621 uint32_t gamma_mode;
@@ -598,14 +630,6 @@ struct vlv_wm_state {
598 bool cxsr; 630 bool cxsr;
599}; 631};
600 632
601struct intel_mmio_flip {
602 struct work_struct work;
603 struct drm_i915_private *i915;
604 struct drm_i915_gem_request *req;
605 struct intel_crtc *crtc;
606 unsigned int rotation;
607};
608
609struct intel_crtc { 633struct intel_crtc {
610 struct drm_crtc base; 634 struct drm_crtc base;
611 enum pipe pipe; 635 enum pipe pipe;
@@ -620,7 +644,7 @@ struct intel_crtc {
620 unsigned long enabled_power_domains; 644 unsigned long enabled_power_domains;
621 bool lowfreq_avail; 645 bool lowfreq_avail;
622 struct intel_overlay *overlay; 646 struct intel_overlay *overlay;
623 struct intel_unpin_work *unpin_work; 647 struct intel_flip_work *flip_work;
624 648
625 atomic_t unpin_work_count; 649 atomic_t unpin_work_count;
626 650
@@ -815,6 +839,7 @@ struct intel_dp {
815 uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; 839 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
816 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; 840 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
817 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; 841 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
842 uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
818 /* sink rates as reported by DP_SUPPORTED_LINK_RATES */ 843 /* sink rates as reported by DP_SUPPORTED_LINK_RATES */
819 uint8_t num_sink_rates; 844 uint8_t num_sink_rates;
820 int sink_rates[DP_MAX_SUPPORTED_RATES]; 845 int sink_rates[DP_MAX_SUPPORTED_RATES];
@@ -947,22 +972,21 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
947 return dev_priv->plane_to_crtc_mapping[plane]; 972 return dev_priv->plane_to_crtc_mapping[plane];
948} 973}
949 974
950struct intel_unpin_work { 975struct intel_flip_work {
951 struct work_struct work; 976 struct work_struct unpin_work;
977 struct work_struct mmio_work;
978
952 struct drm_crtc *crtc; 979 struct drm_crtc *crtc;
953 struct drm_framebuffer *old_fb; 980 struct drm_framebuffer *old_fb;
954 struct drm_i915_gem_object *pending_flip_obj; 981 struct drm_i915_gem_object *pending_flip_obj;
955 struct drm_pending_vblank_event *event; 982 struct drm_pending_vblank_event *event;
956 atomic_t pending; 983 atomic_t pending;
957#define INTEL_FLIP_INACTIVE 0
958#define INTEL_FLIP_PENDING 1
959#define INTEL_FLIP_COMPLETE 2
960 u32 flip_count; 984 u32 flip_count;
961 u32 gtt_offset; 985 u32 gtt_offset;
962 struct drm_i915_gem_request *flip_queued_req; 986 struct drm_i915_gem_request *flip_queued_req;
963 u32 flip_queued_vblank; 987 u32 flip_queued_vblank;
964 u32 flip_ready_vblank; 988 u32 flip_ready_vblank;
965 bool enable_stall_check; 989 unsigned int rotation;
966}; 990};
967 991
968struct intel_load_detect_pipe { 992struct intel_load_detect_pipe {
@@ -1031,9 +1055,9 @@ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
1031void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 1055void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
1032void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 1056void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
1033void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 1057void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
1034void gen6_reset_rps_interrupts(struct drm_device *dev); 1058void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
1035void gen6_enable_rps_interrupts(struct drm_device *dev); 1059void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
1036void gen6_disable_rps_interrupts(struct drm_device *dev); 1060void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
1037u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask); 1061u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
1038void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); 1062void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
1039void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); 1063void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
@@ -1112,14 +1136,16 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv);
1112void i915_audio_component_cleanup(struct drm_i915_private *dev_priv); 1136void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
1113 1137
1114/* intel_display.c */ 1138/* intel_display.c */
1139void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
1140void intel_update_rawclk(struct drm_i915_private *dev_priv);
1115int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 1141int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
1116 const char *name, u32 reg, int ref_freq); 1142 const char *name, u32 reg, int ref_freq);
1117extern const struct drm_plane_funcs intel_plane_funcs; 1143extern const struct drm_plane_funcs intel_plane_funcs;
1118void intel_init_display_hooks(struct drm_i915_private *dev_priv); 1144void intel_init_display_hooks(struct drm_i915_private *dev_priv);
1119unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); 1145unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
1120bool intel_has_pending_fb_unpin(struct drm_device *dev); 1146bool intel_has_pending_fb_unpin(struct drm_device *dev);
1121void intel_mark_busy(struct drm_device *dev); 1147void intel_mark_busy(struct drm_i915_private *dev_priv);
1122void intel_mark_idle(struct drm_device *dev); 1148void intel_mark_idle(struct drm_i915_private *dev_priv);
1123void intel_crtc_restore_mode(struct drm_crtc *crtc); 1149void intel_crtc_restore_mode(struct drm_crtc *crtc);
1124int intel_display_suspend(struct drm_device *dev); 1150int intel_display_suspend(struct drm_device *dev);
1125void intel_encoder_destroy(struct drm_encoder *encoder); 1151void intel_encoder_destroy(struct drm_encoder *encoder);
@@ -1128,7 +1154,6 @@ struct intel_connector *intel_connector_alloc(void);
1128bool intel_connector_get_hw_state(struct intel_connector *connector); 1154bool intel_connector_get_hw_state(struct intel_connector *connector);
1129void intel_connector_attach_encoder(struct intel_connector *connector, 1155void intel_connector_attach_encoder(struct intel_connector *connector,
1130 struct intel_encoder *encoder); 1156 struct intel_encoder *encoder);
1131struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
1132struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 1157struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
1133 struct drm_crtc *crtc); 1158 struct drm_crtc *crtc);
1134enum pipe intel_get_pipe_from_connector(struct intel_connector *connector); 1159enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
@@ -1151,6 +1176,9 @@ intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
1151 if (crtc->active) 1176 if (crtc->active)
1152 intel_wait_for_vblank(dev, pipe); 1177 intel_wait_for_vblank(dev, pipe);
1153} 1178}
1179
1180u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
1181
1154int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); 1182int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
1155void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1183void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1156 struct intel_digital_port *dport, 1184 struct intel_digital_port *dport,
@@ -1164,14 +1192,14 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
1164 struct drm_modeset_acquire_ctx *ctx); 1192 struct drm_modeset_acquire_ctx *ctx);
1165int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 1193int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1166 unsigned int rotation); 1194 unsigned int rotation);
1195void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
1167struct drm_framebuffer * 1196struct drm_framebuffer *
1168__intel_framebuffer_create(struct drm_device *dev, 1197__intel_framebuffer_create(struct drm_device *dev,
1169 struct drm_mode_fb_cmd2 *mode_cmd, 1198 struct drm_mode_fb_cmd2 *mode_cmd,
1170 struct drm_i915_gem_object *obj); 1199 struct drm_i915_gem_object *obj);
1171void intel_prepare_page_flip(struct drm_device *dev, int plane); 1200void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe);
1172void intel_finish_page_flip(struct drm_device *dev, int pipe); 1201void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
1173void intel_finish_page_flip_plane(struct drm_device *dev, int plane); 1202void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
1174void intel_check_page_flip(struct drm_device *dev, int pipe);
1175int intel_prepare_plane_fb(struct drm_plane *plane, 1203int intel_prepare_plane_fb(struct drm_plane *plane,
1176 const struct drm_plane_state *new_state); 1204 const struct drm_plane_state *new_state);
1177void intel_cleanup_plane_fb(struct drm_plane *plane, 1205void intel_cleanup_plane_fb(struct drm_plane *plane,
@@ -1228,13 +1256,12 @@ u32 intel_compute_tile_offset(int *x, int *y,
1228 const struct drm_framebuffer *fb, int plane, 1256 const struct drm_framebuffer *fb, int plane,
1229 unsigned int pitch, 1257 unsigned int pitch,
1230 unsigned int rotation); 1258 unsigned int rotation);
1231void intel_prepare_reset(struct drm_device *dev); 1259void intel_prepare_reset(struct drm_i915_private *dev_priv);
1232void intel_finish_reset(struct drm_device *dev); 1260void intel_finish_reset(struct drm_i915_private *dev_priv);
1233void hsw_enable_pc8(struct drm_i915_private *dev_priv); 1261void hsw_enable_pc8(struct drm_i915_private *dev_priv);
1234void hsw_disable_pc8(struct drm_i915_private *dev_priv); 1262void hsw_disable_pc8(struct drm_i915_private *dev_priv);
1235void broxton_init_cdclk(struct drm_i915_private *dev_priv); 1263void broxton_init_cdclk(struct drm_i915_private *dev_priv);
1236void broxton_uninit_cdclk(struct drm_i915_private *dev_priv); 1264void broxton_uninit_cdclk(struct drm_i915_private *dev_priv);
1237bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv);
1238void broxton_ddi_phy_init(struct drm_i915_private *dev_priv); 1265void broxton_ddi_phy_init(struct drm_i915_private *dev_priv);
1239void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv); 1266void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv);
1240void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv); 1267void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv);
@@ -1243,8 +1270,8 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv);
1243void bxt_disable_dc9(struct drm_i915_private *dev_priv); 1270void bxt_disable_dc9(struct drm_i915_private *dev_priv);
1244void gen9_enable_dc5(struct drm_i915_private *dev_priv); 1271void gen9_enable_dc5(struct drm_i915_private *dev_priv);
1245void skl_init_cdclk(struct drm_i915_private *dev_priv); 1272void skl_init_cdclk(struct drm_i915_private *dev_priv);
1246int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
1247void skl_uninit_cdclk(struct drm_i915_private *dev_priv); 1273void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
1274unsigned int skl_cdclk_get_vco(unsigned int freq);
1248void skl_enable_dc6(struct drm_i915_private *dev_priv); 1275void skl_enable_dc6(struct drm_i915_private *dev_priv);
1249void skl_disable_dc6(struct drm_i915_private *dev_priv); 1276void skl_disable_dc6(struct drm_i915_private *dev_priv);
1250void intel_dp_get_m_n(struct intel_crtc *crtc, 1277void intel_dp_get_m_n(struct intel_crtc *crtc,
@@ -1252,8 +1279,8 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
1252void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); 1279void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
1253int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); 1280int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
1254bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 1281bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1255 intel_clock_t *best_clock); 1282 struct dpll *best_clock);
1256int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock); 1283int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
1257 1284
1258bool intel_crtc_active(struct drm_crtc *crtc); 1285bool intel_crtc_active(struct drm_crtc *crtc);
1259void hsw_enable_ips(struct intel_crtc *crtc); 1286void hsw_enable_ips(struct intel_crtc *crtc);
@@ -1284,7 +1311,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
1284void intel_csr_ucode_resume(struct drm_i915_private *); 1311void intel_csr_ucode_resume(struct drm_i915_private *);
1285 1312
1286/* intel_dp.c */ 1313/* intel_dp.c */
1287void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); 1314bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
1288bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 1315bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
1289 struct intel_connector *intel_connector); 1316 struct intel_connector *intel_connector);
1290void intel_dp_set_link_params(struct intel_dp *intel_dp, 1317void intel_dp_set_link_params(struct intel_dp *intel_dp,
@@ -1339,12 +1366,22 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
1339bool 1366bool
1340intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]); 1367intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
1341 1368
1369static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
1370{
1371 return ~((1 << lane_count) - 1) & 0xf;
1372}
1373
1374/* intel_dp_aux_backlight.c */
1375int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
1376
1342/* intel_dp_mst.c */ 1377/* intel_dp_mst.c */
1343int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); 1378int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
1344void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); 1379void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
1345/* intel_dsi.c */ 1380/* intel_dsi.c */
1346void intel_dsi_init(struct drm_device *dev); 1381void intel_dsi_init(struct drm_device *dev);
1347 1382
1383/* intel_dsi_dcs_backlight.c */
1384int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
1348 1385
1349/* intel_dvo.c */ 1386/* intel_dvo.c */
1350void intel_dvo_init(struct drm_device *dev); 1387void intel_dvo_init(struct drm_device *dev);
@@ -1424,13 +1461,13 @@ void intel_attach_aspect_ratio_property(struct drm_connector *connector);
1424 1461
1425 1462
1426/* intel_overlay.c */ 1463/* intel_overlay.c */
1427void intel_setup_overlay(struct drm_device *dev); 1464void intel_setup_overlay(struct drm_i915_private *dev_priv);
1428void intel_cleanup_overlay(struct drm_device *dev); 1465void intel_cleanup_overlay(struct drm_i915_private *dev_priv);
1429int intel_overlay_switch_off(struct intel_overlay *overlay); 1466int intel_overlay_switch_off(struct intel_overlay *overlay);
1430int intel_overlay_put_image(struct drm_device *dev, void *data, 1467int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
1431 struct drm_file *file_priv); 1468 struct drm_file *file_priv);
1432int intel_overlay_attrs(struct drm_device *dev, void *data, 1469int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
1433 struct drm_file *file_priv); 1470 struct drm_file *file_priv);
1434void intel_overlay_reset(struct drm_i915_private *dev_priv); 1471void intel_overlay_reset(struct drm_i915_private *dev_priv);
1435 1472
1436 1473
@@ -1601,21 +1638,20 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
1601void intel_pm_setup(struct drm_device *dev); 1638void intel_pm_setup(struct drm_device *dev);
1602void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 1639void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
1603void intel_gpu_ips_teardown(void); 1640void intel_gpu_ips_teardown(void);
1604void intel_init_gt_powersave(struct drm_device *dev); 1641void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
1605void intel_cleanup_gt_powersave(struct drm_device *dev); 1642void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
1606void intel_enable_gt_powersave(struct drm_device *dev); 1643void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
1607void intel_disable_gt_powersave(struct drm_device *dev); 1644void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
1608void intel_suspend_gt_powersave(struct drm_device *dev); 1645void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
1609void intel_reset_gt_powersave(struct drm_device *dev); 1646void intel_reset_gt_powersave(struct drm_i915_private *dev_priv);
1610void gen6_update_ring_freq(struct drm_device *dev); 1647void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
1611void gen6_rps_busy(struct drm_i915_private *dev_priv); 1648void gen6_rps_busy(struct drm_i915_private *dev_priv);
1612void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); 1649void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
1613void gen6_rps_idle(struct drm_i915_private *dev_priv); 1650void gen6_rps_idle(struct drm_i915_private *dev_priv);
1614void gen6_rps_boost(struct drm_i915_private *dev_priv, 1651void gen6_rps_boost(struct drm_i915_private *dev_priv,
1615 struct intel_rps_client *rps, 1652 struct intel_rps_client *rps,
1616 unsigned long submitted); 1653 unsigned long submitted);
1617void intel_queue_rps_boost_for_request(struct drm_device *dev, 1654void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
1618 struct drm_i915_gem_request *req);
1619void vlv_wm_get_hw_state(struct drm_device *dev); 1655void vlv_wm_get_hw_state(struct drm_device *dev);
1620void ilk_wm_get_hw_state(struct drm_device *dev); 1656void ilk_wm_get_hw_state(struct drm_device *dev);
1621void skl_wm_get_hw_state(struct drm_device *dev); 1657void skl_wm_get_hw_state(struct drm_device *dev);
@@ -1623,7 +1659,11 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
1623 struct skl_ddb_allocation *ddb /* out */); 1659 struct skl_ddb_allocation *ddb /* out */);
1624uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); 1660uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
1625bool ilk_disable_lp_wm(struct drm_device *dev); 1661bool ilk_disable_lp_wm(struct drm_device *dev);
1626int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6); 1662int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
1663static inline int intel_enable_rc6(void)
1664{
1665 return i915.enable_rc6;
1666}
1627 1667
1628/* intel_sdvo.c */ 1668/* intel_sdvo.c */
1629bool intel_sdvo_init(struct drm_device *dev, 1669bool intel_sdvo_init(struct drm_device *dev,
@@ -1635,7 +1675,7 @@ int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
1635int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 1675int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1636 struct drm_file *file_priv); 1676 struct drm_file *file_priv);
1637void intel_pipe_update_start(struct intel_crtc *crtc); 1677void intel_pipe_update_start(struct intel_crtc *crtc);
1638void intel_pipe_update_end(struct intel_crtc *crtc); 1678void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work);
1639 1679
1640/* intel_tv.c */ 1680/* intel_tv.c */
1641void intel_tv_init(struct drm_device *dev); 1681void intel_tv_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 366ad6c67ce4..a2ead5eac336 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -532,7 +532,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
532 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 532 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
533 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 533 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
534 enum port port; 534 enum port port;
535 u32 tmp;
536 535
537 DRM_DEBUG_KMS("\n"); 536 DRM_DEBUG_KMS("\n");
538 537
@@ -551,11 +550,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
551 550
552 msleep(intel_dsi->panel_on_delay); 551 msleep(intel_dsi->panel_on_delay);
553 552
554 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 553 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
554 u32 val;
555
555 /* Disable DPOunit clock gating, can stall pipe */ 556 /* Disable DPOunit clock gating, can stall pipe */
556 tmp = I915_READ(DSPCLK_GATE_D); 557 val = I915_READ(DSPCLK_GATE_D);
557 tmp |= DPOUNIT_CLOCK_GATE_DISABLE; 558 val |= DPOUNIT_CLOCK_GATE_DISABLE;
558 I915_WRITE(DSPCLK_GATE_D, tmp); 559 I915_WRITE(DSPCLK_GATE_D, val);
559 } 560 }
560 561
561 /* put device in ready state */ 562 /* put device in ready state */
@@ -693,7 +694,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
693 694
694 intel_dsi_clear_device_ready(encoder); 695 intel_dsi_clear_device_ready(encoder);
695 696
696 if (!IS_BROXTON(dev_priv)) { 697 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
697 u32 val; 698 u32 val;
698 699
699 val = I915_READ(DSPCLK_GATE_D); 700 val = I915_READ(DSPCLK_GATE_D);
@@ -1378,7 +1379,6 @@ static const struct drm_encoder_funcs intel_dsi_funcs = {
1378static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = { 1379static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
1379 .get_modes = intel_dsi_get_modes, 1380 .get_modes = intel_dsi_get_modes,
1380 .mode_valid = intel_dsi_mode_valid, 1381 .mode_valid = intel_dsi_mode_valid,
1381 .best_encoder = intel_best_encoder,
1382}; 1382};
1383 1383
1384static const struct drm_connector_funcs intel_dsi_connector_funcs = { 1384static const struct drm_connector_funcs intel_dsi_connector_funcs = {
@@ -1449,7 +1449,7 @@ void intel_dsi_init(struct drm_device *dev)
1449 connector = &intel_connector->base; 1449 connector = &intel_connector->base;
1450 1450
1451 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI, 1451 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
1452 NULL); 1452 "DSI %c", port_name(port));
1453 1453
1454 intel_encoder->compute_config = intel_dsi_compute_config; 1454 intel_encoder->compute_config = intel_dsi_compute_config;
1455 intel_encoder->pre_enable = intel_dsi_pre_enable; 1455 intel_encoder->pre_enable = intel_dsi_pre_enable;
@@ -1473,10 +1473,42 @@ void intel_dsi_init(struct drm_device *dev)
1473 else 1473 else
1474 intel_encoder->crtc_mask = BIT(PIPE_B); 1474 intel_encoder->crtc_mask = BIT(PIPE_B);
1475 1475
1476 if (dev_priv->vbt.dsi.config->dual_link) 1476 if (dev_priv->vbt.dsi.config->dual_link) {
1477 intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C); 1477 intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
1478 else 1478
1479 switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
1480 case DL_DCS_PORT_A:
1481 intel_dsi->dcs_backlight_ports = BIT(PORT_A);
1482 break;
1483 case DL_DCS_PORT_C:
1484 intel_dsi->dcs_backlight_ports = BIT(PORT_C);
1485 break;
1486 default:
1487 case DL_DCS_PORT_A_AND_C:
1488 intel_dsi->dcs_backlight_ports = BIT(PORT_A) | BIT(PORT_C);
1489 break;
1490 }
1491
1492 switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
1493 case DL_DCS_PORT_A:
1494 intel_dsi->dcs_cabc_ports = BIT(PORT_A);
1495 break;
1496 case DL_DCS_PORT_C:
1497 intel_dsi->dcs_cabc_ports = BIT(PORT_C);
1498 break;
1499 default:
1500 case DL_DCS_PORT_A_AND_C:
1501 intel_dsi->dcs_cabc_ports = BIT(PORT_A) | BIT(PORT_C);
1502 break;
1503 }
1504 } else {
1479 intel_dsi->ports = BIT(port); 1505 intel_dsi->ports = BIT(port);
1506 intel_dsi->dcs_backlight_ports = BIT(port);
1507 intel_dsi->dcs_cabc_ports = BIT(port);
1508 }
1509
1510 if (!dev_priv->vbt.dsi.config->cabc_supported)
1511 intel_dsi->dcs_cabc_ports = 0;
1480 1512
1481 /* Create a DSI host (and a device) for each port. */ 1513 /* Create a DSI host (and a device) for each port. */
1482 for_each_dsi_port(port, intel_dsi->ports) { 1514 for_each_dsi_port(port, intel_dsi->ports) {
@@ -1545,6 +1577,9 @@ void intel_dsi_init(struct drm_device *dev)
1545 goto err; 1577 goto err;
1546 } 1578 }
1547 1579
1580 connector->display_info.width_mm = fixed_mode->width_mm;
1581 connector->display_info.height_mm = fixed_mode->height_mm;
1582
1548 intel_panel_init(&intel_connector->panel, fixed_mode, NULL); 1583 intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
1549 1584
1550 intel_dsi_add_properties(intel_connector); 1585 intel_dsi_add_properties(intel_connector);
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 61a6957fc6c2..5967ea6d6045 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -78,6 +78,10 @@ struct intel_dsi {
78 78
79 u8 escape_clk_div; 79 u8 escape_clk_div;
80 u8 dual_link; 80 u8 dual_link;
81
82 u16 dcs_backlight_ports;
83 u16 dcs_cabc_ports;
84
81 u8 pixel_overlap; 85 u8 pixel_overlap;
82 u32 port_bits; 86 u32 port_bits;
83 u32 bw_timer; 87 u32 bw_timer;
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
new file mode 100644
index 000000000000..f0dc427743f8
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
@@ -0,0 +1,179 @@
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Deepak M <m.deepak at intel.com>
24 */
25
26#include "intel_drv.h"
27#include "intel_dsi.h"
28#include "i915_drv.h"
29#include <video/mipi_display.h>
30#include <drm/drm_mipi_dsi.h>
31
32#define CONTROL_DISPLAY_BCTRL (1 << 5)
33#define CONTROL_DISPLAY_DD (1 << 3)
34#define CONTROL_DISPLAY_BL (1 << 2)
35
36#define POWER_SAVE_OFF (0 << 0)
37#define POWER_SAVE_LOW (1 << 0)
38#define POWER_SAVE_MEDIUM (2 << 0)
39#define POWER_SAVE_HIGH (3 << 0)
40#define POWER_SAVE_OUTDOOR_MODE (4 << 0)
41
42#define PANEL_PWM_MAX_VALUE 0xFF
43
44static u32 dcs_get_backlight(struct intel_connector *connector)
45{
46 struct intel_encoder *encoder = connector->encoder;
47 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
48 struct mipi_dsi_device *dsi_device;
49 u8 data;
50 enum port port;
51
52 /* FIXME: Need to take care of 16 bit brightness level */
53 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
54 dsi_device = intel_dsi->dsi_hosts[port]->device;
55 mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
56 &data, sizeof(data));
57 break;
58 }
59
60 return data;
61}
62
63static void dcs_set_backlight(struct intel_connector *connector, u32 level)
64{
65 struct intel_encoder *encoder = connector->encoder;
66 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
67 struct mipi_dsi_device *dsi_device;
68 u8 data = level;
69 enum port port;
70
71 /* FIXME: Need to take care of 16 bit brightness level */
72 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
73 dsi_device = intel_dsi->dsi_hosts[port]->device;
74 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
75 &data, sizeof(data));
76 }
77}
78
79static void dcs_disable_backlight(struct intel_connector *connector)
80{
81 struct intel_encoder *encoder = connector->encoder;
82 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
83 struct mipi_dsi_device *dsi_device;
84 enum port port;
85
86 dcs_set_backlight(connector, 0);
87
88 for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
89 u8 cabc = POWER_SAVE_OFF;
90
91 dsi_device = intel_dsi->dsi_hosts[port]->device;
92 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
93 &cabc, sizeof(cabc));
94 }
95
96 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
97 u8 ctrl = 0;
98
99 dsi_device = intel_dsi->dsi_hosts[port]->device;
100
101 mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
102 &ctrl, sizeof(ctrl));
103
104 ctrl &= ~CONTROL_DISPLAY_BL;
105 ctrl &= ~CONTROL_DISPLAY_DD;
106 ctrl &= ~CONTROL_DISPLAY_BCTRL;
107
108 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
109 &ctrl, sizeof(ctrl));
110 }
111}
112
113static void dcs_enable_backlight(struct intel_connector *connector)
114{
115 struct intel_encoder *encoder = connector->encoder;
116 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
117 struct intel_panel *panel = &connector->panel;
118 struct mipi_dsi_device *dsi_device;
119 enum port port;
120
121 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
122 u8 ctrl = 0;
123
124 dsi_device = intel_dsi->dsi_hosts[port]->device;
125
126 mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
127 &ctrl, sizeof(ctrl));
128
129 ctrl |= CONTROL_DISPLAY_BL;
130 ctrl |= CONTROL_DISPLAY_DD;
131 ctrl |= CONTROL_DISPLAY_BCTRL;
132
133 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
134 &ctrl, sizeof(ctrl));
135 }
136
137 for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
138 u8 cabc = POWER_SAVE_MEDIUM;
139
140 dsi_device = intel_dsi->dsi_hosts[port]->device;
141 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
142 &cabc, sizeof(cabc));
143 }
144
145 dcs_set_backlight(connector, panel->backlight.level);
146}
147
148static int dcs_setup_backlight(struct intel_connector *connector,
149 enum pipe unused)
150{
151 struct intel_panel *panel = &connector->panel;
152
153 panel->backlight.max = PANEL_PWM_MAX_VALUE;
154 panel->backlight.level = PANEL_PWM_MAX_VALUE;
155
156 return 0;
157}
158
159int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
160{
161 struct drm_device *dev = intel_connector->base.dev;
162 struct drm_i915_private *dev_priv = dev->dev_private;
163 struct intel_encoder *encoder = intel_connector->encoder;
164 struct intel_panel *panel = &intel_connector->panel;
165
166 if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
167 return -ENODEV;
168
169 if (WARN_ON(encoder->type != INTEL_OUTPUT_DSI))
170 return -EINVAL;
171
172 panel->backlight.setup = dcs_setup_backlight;
173 panel->backlight.enable = dcs_enable_backlight;
174 panel->backlight.disable = dcs_disable_backlight;
175 panel->backlight.set = dcs_set_backlight;
176 panel->backlight.get = dcs_get_backlight;
177
178 return 0;
179}
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index e498f1c3221e..f122484bedfc 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -95,6 +95,24 @@ static struct gpio_map vlv_gpio_table[] = {
95 { VLV_GPIO_NC_11_PANEL1_BKLTCTL }, 95 { VLV_GPIO_NC_11_PANEL1_BKLTCTL },
96}; 96};
97 97
98#define CHV_GPIO_IDX_START_N 0
99#define CHV_GPIO_IDX_START_E 73
100#define CHV_GPIO_IDX_START_SW 100
101#define CHV_GPIO_IDX_START_SE 198
102
103#define CHV_VBT_MAX_PINS_PER_FMLY 15
104
105#define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8)
106#define CHV_GPIO_GPIOEN (1 << 15)
107#define CHV_GPIO_GPIOCFG_GPIO (0 << 8)
108#define CHV_GPIO_GPIOCFG_GPO (1 << 8)
109#define CHV_GPIO_GPIOCFG_GPI (2 << 8)
110#define CHV_GPIO_GPIOCFG_HIZ (3 << 8)
111#define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1)
112
113#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4)
114#define CHV_GPIO_CFGLOCK (1 << 31)
115
98static inline enum port intel_dsi_seq_port_to_port(u8 port) 116static inline enum port intel_dsi_seq_port_to_port(u8 port)
99{ 117{
100 return port ? PORT_C : PORT_A; 118 return port ? PORT_C : PORT_A;
@@ -203,13 +221,14 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
203 map = &vlv_gpio_table[gpio_index]; 221 map = &vlv_gpio_table[gpio_index];
204 222
205 if (dev_priv->vbt.dsi.seq_version >= 3) { 223 if (dev_priv->vbt.dsi.seq_version >= 3) {
206 DRM_DEBUG_KMS("GPIO element v3 not supported\n"); 224 /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
207 return; 225 port = IOSF_PORT_GPIO_NC;
208 } else { 226 } else {
209 if (gpio_source == 0) { 227 if (gpio_source == 0) {
210 port = IOSF_PORT_GPIO_NC; 228 port = IOSF_PORT_GPIO_NC;
211 } else if (gpio_source == 1) { 229 } else if (gpio_source == 1) {
212 port = IOSF_PORT_GPIO_SC; 230 DRM_DEBUG_KMS("SC gpio not supported\n");
231 return;
213 } else { 232 } else {
214 DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source); 233 DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
215 return; 234 return;
@@ -231,6 +250,56 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
231 mutex_unlock(&dev_priv->sb_lock); 250 mutex_unlock(&dev_priv->sb_lock);
232} 251}
233 252
253static void chv_exec_gpio(struct drm_i915_private *dev_priv,
254 u8 gpio_source, u8 gpio_index, bool value)
255{
256 u16 cfg0, cfg1;
257 u16 family_num;
258 u8 port;
259
260 if (dev_priv->vbt.dsi.seq_version >= 3) {
261 if (gpio_index >= CHV_GPIO_IDX_START_SE) {
262 /* XXX: it's unclear whether 255->57 is part of SE. */
263 gpio_index -= CHV_GPIO_IDX_START_SE;
264 port = CHV_IOSF_PORT_GPIO_SE;
265 } else if (gpio_index >= CHV_GPIO_IDX_START_SW) {
266 gpio_index -= CHV_GPIO_IDX_START_SW;
267 port = CHV_IOSF_PORT_GPIO_SW;
268 } else if (gpio_index >= CHV_GPIO_IDX_START_E) {
269 gpio_index -= CHV_GPIO_IDX_START_E;
270 port = CHV_IOSF_PORT_GPIO_E;
271 } else {
272 port = CHV_IOSF_PORT_GPIO_N;
273 }
274 } else {
275 /* XXX: The spec is unclear about CHV GPIO on seq v2 */
276 if (gpio_source != 0) {
277 DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
278 return;
279 }
280
281 if (gpio_index >= CHV_GPIO_IDX_START_E) {
282 DRM_DEBUG_KMS("invalid gpio index %u for GPIO N\n",
283 gpio_index);
284 return;
285 }
286
287 port = CHV_IOSF_PORT_GPIO_N;
288 }
289
290 family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY;
291 gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY;
292
293 cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index);
294 cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index);
295
296 mutex_lock(&dev_priv->sb_lock);
297 vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
298 vlv_iosf_sb_write(dev_priv, port, cfg0,
299 CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value));
300 mutex_unlock(&dev_priv->sb_lock);
301}
302
234static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) 303static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
235{ 304{
236 struct drm_device *dev = intel_dsi->base.base.dev; 305 struct drm_device *dev = intel_dsi->base.base.dev;
@@ -254,6 +323,8 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
254 323
255 if (IS_VALLEYVIEW(dev_priv)) 324 if (IS_VALLEYVIEW(dev_priv))
256 vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value); 325 vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
326 else if (IS_CHERRYVIEW(dev_priv))
327 chv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
257 else 328 else
258 DRM_DEBUG_KMS("GPIO element not supported on this platform\n"); 329 DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
259 330
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 286baec979c8..c86f88ed92fd 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -351,7 +351,6 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
351static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { 351static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
352 .mode_valid = intel_dvo_mode_valid, 352 .mode_valid = intel_dvo_mode_valid,
353 .get_modes = intel_dvo_get_modes, 353 .get_modes = intel_dvo_get_modes,
354 .best_encoder = intel_best_encoder,
355}; 354};
356 355
357static void intel_dvo_enc_destroy(struct drm_encoder *encoder) 356static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
@@ -406,6 +405,18 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
406 return mode; 405 return mode;
407} 406}
408 407
408static char intel_dvo_port_name(i915_reg_t dvo_reg)
409{
410 if (i915_mmio_reg_equal(dvo_reg, DVOA))
411 return 'A';
412 else if (i915_mmio_reg_equal(dvo_reg, DVOB))
413 return 'B';
414 else if (i915_mmio_reg_equal(dvo_reg, DVOC))
415 return 'C';
416 else
417 return '?';
418}
419
409void intel_dvo_init(struct drm_device *dev) 420void intel_dvo_init(struct drm_device *dev)
410{ 421{
411 struct drm_i915_private *dev_priv = dev->dev_private; 422 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -428,8 +439,6 @@ void intel_dvo_init(struct drm_device *dev)
428 intel_dvo->attached_connector = intel_connector; 439 intel_dvo->attached_connector = intel_connector;
429 440
430 intel_encoder = &intel_dvo->base; 441 intel_encoder = &intel_dvo->base;
431 drm_encoder_init(dev, &intel_encoder->base,
432 &intel_dvo_enc_funcs, encoder_type, NULL);
433 442
434 intel_encoder->disable = intel_disable_dvo; 443 intel_encoder->disable = intel_disable_dvo;
435 intel_encoder->enable = intel_enable_dvo; 444 intel_encoder->enable = intel_enable_dvo;
@@ -496,6 +505,10 @@ void intel_dvo_init(struct drm_device *dev)
496 if (!dvoinit) 505 if (!dvoinit)
497 continue; 506 continue;
498 507
508 drm_encoder_init(dev, &intel_encoder->base,
509 &intel_dvo_enc_funcs, encoder_type,
510 "DVO %c", intel_dvo_port_name(dvo->dvo_reg));
511
499 intel_encoder->type = INTEL_OUTPUT_DVO; 512 intel_encoder->type = INTEL_OUTPUT_DVO;
500 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 513 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
501 switch (dvo->type) { 514 switch (dvo->type) {
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index d5a7cfec589b..45ee07b888a0 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -374,8 +374,9 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
374 * @dev_priv: i915 device instance 374 * @dev_priv: i915 device instance
375 * 375 *
376 * This function is used to verify the current state of FBC. 376 * This function is used to verify the current state of FBC.
377 *
377 * FIXME: This should be tracked in the plane config eventually 378 * FIXME: This should be tracked in the plane config eventually
378 * instead of queried at runtime for most callers. 379 * instead of queried at runtime for most callers.
379 */ 380 */
380bool intel_fbc_is_active(struct drm_i915_private *dev_priv) 381bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
381{ 382{
@@ -740,7 +741,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
740 741
741 /* FIXME: We lack the proper locking here, so only run this on the 742 /* FIXME: We lack the proper locking here, so only run this on the
742 * platforms that need. */ 743 * platforms that need. */
743 if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7) 744 if (IS_GEN(dev_priv, 5, 6))
744 cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj); 745 cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
745 cache->fb.pixel_format = fb->pixel_format; 746 cache->fb.pixel_format = fb->pixel_format;
746 cache->fb.stride = fb->pitches[0]; 747 cache->fb.stride = fb->pitches[0];
@@ -827,7 +828,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
827 bool enable_by_default = IS_HASWELL(dev_priv) || 828 bool enable_by_default = IS_HASWELL(dev_priv) ||
828 IS_BROADWELL(dev_priv); 829 IS_BROADWELL(dev_priv);
829 830
830 if (intel_vgpu_active(dev_priv->dev)) { 831 if (intel_vgpu_active(dev_priv)) {
831 fbc->no_fbc_reason = "VGPU is active"; 832 fbc->no_fbc_reason = "VGPU is active";
832 return false; 833 return false;
833 } 834 }
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index ab8d09a81f14..4c725ad6fb54 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -150,10 +150,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
150 if (size * 2 < ggtt->stolen_usable_size) 150 if (size * 2 < ggtt->stolen_usable_size)
151 obj = i915_gem_object_create_stolen(dev, size); 151 obj = i915_gem_object_create_stolen(dev, size);
152 if (obj == NULL) 152 if (obj == NULL)
153 obj = i915_gem_alloc_object(dev, size); 153 obj = i915_gem_object_create(dev, size);
154 if (!obj) { 154 if (IS_ERR(obj)) {
155 DRM_ERROR("failed to allocate framebuffer\n"); 155 DRM_ERROR("failed to allocate framebuffer\n");
156 ret = -ENOMEM; 156 ret = PTR_ERR(obj);
157 goto out; 157 goto out;
158 } 158 }
159 159
@@ -186,9 +186,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
186 struct i915_ggtt *ggtt = &dev_priv->ggtt; 186 struct i915_ggtt *ggtt = &dev_priv->ggtt;
187 struct fb_info *info; 187 struct fb_info *info;
188 struct drm_framebuffer *fb; 188 struct drm_framebuffer *fb;
189 struct i915_vma *vma;
189 struct drm_i915_gem_object *obj; 190 struct drm_i915_gem_object *obj;
190 int size, ret;
191 bool prealloc = false; 191 bool prealloc = false;
192 void *vaddr;
193 int ret;
192 194
193 if (intel_fb && 195 if (intel_fb &&
194 (sizes->fb_width > intel_fb->base.width || 196 (sizes->fb_width > intel_fb->base.width ||
@@ -214,7 +216,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
214 } 216 }
215 217
216 obj = intel_fb->obj; 218 obj = intel_fb->obj;
217 size = obj->base.size;
218 219
219 mutex_lock(&dev->struct_mutex); 220 mutex_lock(&dev->struct_mutex);
220 221
@@ -244,22 +245,23 @@ static int intelfb_create(struct drm_fb_helper *helper,
244 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; 245 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
245 info->fbops = &intelfb_ops; 246 info->fbops = &intelfb_ops;
246 247
248 vma = i915_gem_obj_to_ggtt(obj);
249
247 /* setup aperture base/size for vesafb takeover */ 250 /* setup aperture base/size for vesafb takeover */
248 info->apertures->ranges[0].base = dev->mode_config.fb_base; 251 info->apertures->ranges[0].base = dev->mode_config.fb_base;
249 info->apertures->ranges[0].size = ggtt->mappable_end; 252 info->apertures->ranges[0].size = ggtt->mappable_end;
250 253
251 info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj); 254 info->fix.smem_start = dev->mode_config.fb_base + vma->node.start;
252 info->fix.smem_len = size; 255 info->fix.smem_len = vma->node.size;
253 256
254 info->screen_base = 257 vaddr = i915_vma_pin_iomap(vma);
255 ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj), 258 if (IS_ERR(vaddr)) {
256 size);
257 if (!info->screen_base) {
258 DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); 259 DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
259 ret = -ENOSPC; 260 ret = PTR_ERR(vaddr);
260 goto out_destroy_fbi; 261 goto out_destroy_fbi;
261 } 262 }
262 info->screen_size = size; 263 info->screen_base = vaddr;
264 info->screen_size = vma->node.size;
263 265
264 /* This driver doesn't need a VT switch to restore the mode on resume */ 266 /* This driver doesn't need a VT switch to restore the mode on resume */
265 info->skip_vt_switch = true; 267 info->skip_vt_switch = true;
@@ -287,7 +289,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
287out_destroy_fbi: 289out_destroy_fbi:
288 drm_fb_helper_release_fbi(helper); 290 drm_fb_helper_release_fbi(helper);
289out_unpin: 291out_unpin:
290 i915_gem_object_ggtt_unpin(obj); 292 intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
291out_unlock: 293out_unlock:
292 mutex_unlock(&dev->struct_mutex); 294 mutex_unlock(&dev->struct_mutex);
293 return ret; 295 return ret;
@@ -488,10 +490,10 @@ retry:
488 } 490 }
489 crtcs[i] = new_crtc; 491 crtcs[i] = new_crtc;
490 492
491 DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n", 493 DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
492 connector->name, 494 connector->name,
493 pipe_name(to_intel_crtc(connector->state->crtc)->pipe),
494 connector->state->crtc->base.id, 495 connector->state->crtc->base.id,
496 connector->state->crtc->name,
495 modes[i]->hdisplay, modes[i]->vdisplay, 497 modes[i]->hdisplay, modes[i]->vdisplay,
496 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :""); 498 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
497 499
@@ -551,6 +553,11 @@ static void intel_fbdev_destroy(struct drm_device *dev,
551 553
552 if (ifbdev->fb) { 554 if (ifbdev->fb) {
553 drm_framebuffer_unregister_private(&ifbdev->fb->base); 555 drm_framebuffer_unregister_private(&ifbdev->fb->base);
556
557 mutex_lock(&dev->struct_mutex);
558 intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
559 mutex_unlock(&dev->struct_mutex);
560
554 drm_framebuffer_remove(&ifbdev->fb->base); 561 drm_framebuffer_remove(&ifbdev->fb->base);
555 } 562 }
556} 563}
@@ -717,8 +724,6 @@ int intel_fbdev_init(struct drm_device *dev)
717 return ret; 724 return ret;
718 } 725 }
719 726
720 ifbdev->helper.atomic = true;
721
722 dev_priv->fbdev = ifbdev; 727 dev_priv->fbdev = ifbdev;
723 INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker); 728 INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
724 729
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 9d79c4c3e256..41601c71f529 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -48,14 +48,23 @@ struct drm_i915_gem_request;
48 * queue (a circular array of work items), again described in the process 48 * queue (a circular array of work items), again described in the process
49 * descriptor. Work queue pages are mapped momentarily as required. 49 * descriptor. Work queue pages are mapped momentarily as required.
50 * 50 *
51 * Finally, we also keep a few statistics here, including the number of 51 * We also keep a few statistics on failures. Ideally, these should all
52 * submissions to each engine, and a record of the last submission failure 52 * be zero!
53 * (if any). 53 * no_wq_space: times that the submission pre-check found no space was
54 * available in the work queue (note, the queue is shared,
55 * not per-engine). It is OK for this to be nonzero, but
56 * it should not be huge!
57 * q_fail: failed to enqueue a work item. This should never happen,
58 * because we check for space beforehand.
59 * b_fail: failed to ring the doorbell. This should never happen, unless
60 * somehow the hardware misbehaves, or maybe if the GuC firmware
61 * crashes? We probably need to reset the GPU to recover.
62 * retcode: errno from last guc_submit()
54 */ 63 */
55struct i915_guc_client { 64struct i915_guc_client {
56 struct drm_i915_gem_object *client_obj; 65 struct drm_i915_gem_object *client_obj;
57 void *client_base; /* first page (only) of above */ 66 void *client_base; /* first page (only) of above */
58 struct intel_context *owner; 67 struct i915_gem_context *owner;
59 struct intel_guc *guc; 68 struct intel_guc *guc;
60 uint32_t priority; 69 uint32_t priority;
61 uint32_t ctx_index; 70 uint32_t ctx_index;
@@ -71,12 +80,13 @@ struct i915_guc_client {
71 uint32_t wq_tail; 80 uint32_t wq_tail;
72 uint32_t unused; /* Was 'wq_head' */ 81 uint32_t unused; /* Was 'wq_head' */
73 82
74 /* GuC submission statistics & status */ 83 uint32_t no_wq_space;
75 uint64_t submissions[GUC_MAX_ENGINES_NUM]; 84 uint32_t q_fail; /* No longer used */
76 uint32_t q_fail;
77 uint32_t b_fail; 85 uint32_t b_fail;
78 int retcode; 86 int retcode;
79 int spare; /* pad to 32 DWords */ 87
88 /* Per-engine counts of GuC submissions */
89 uint64_t submissions[GUC_MAX_ENGINES_NUM];
80}; 90};
81 91
82enum intel_guc_fw_status { 92enum intel_guc_fw_status {
@@ -138,9 +148,9 @@ struct intel_guc {
138}; 148};
139 149
140/* intel_guc_loader.c */ 150/* intel_guc_loader.c */
141extern void intel_guc_ucode_init(struct drm_device *dev); 151extern void intel_guc_init(struct drm_device *dev);
142extern int intel_guc_ucode_load(struct drm_device *dev); 152extern int intel_guc_setup(struct drm_device *dev);
143extern void intel_guc_ucode_fini(struct drm_device *dev); 153extern void intel_guc_fini(struct drm_device *dev);
144extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status); 154extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
145extern int intel_guc_suspend(struct drm_device *dev); 155extern int intel_guc_suspend(struct drm_device *dev);
146extern int intel_guc_resume(struct drm_device *dev); 156extern int intel_guc_resume(struct drm_device *dev);
@@ -148,10 +158,9 @@ extern int intel_guc_resume(struct drm_device *dev);
148/* i915_guc_submission.c */ 158/* i915_guc_submission.c */
149int i915_guc_submission_init(struct drm_device *dev); 159int i915_guc_submission_init(struct drm_device *dev);
150int i915_guc_submission_enable(struct drm_device *dev); 160int i915_guc_submission_enable(struct drm_device *dev);
151int i915_guc_submit(struct i915_guc_client *client, 161int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
152 struct drm_i915_gem_request *rq); 162int i915_guc_submit(struct drm_i915_gem_request *rq);
153void i915_guc_submission_disable(struct drm_device *dev); 163void i915_guc_submission_disable(struct drm_device *dev);
154void i915_guc_submission_fini(struct drm_device *dev); 164void i915_guc_submission_fini(struct drm_device *dev);
155int i915_guc_wq_check_space(struct i915_guc_client *client);
156 165
157#endif 166#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 2de57ffe5e18..944786d7075b 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -71,7 +71,8 @@
71#define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT) 71#define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT)
72 72
73#define WQ_RING_TAIL_SHIFT 20 73#define WQ_RING_TAIL_SHIFT 20
74#define WQ_RING_TAIL_MASK (0x7FF << WQ_RING_TAIL_SHIFT) 74#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
75#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
75 76
76#define GUC_DOORBELL_ENABLED 1 77#define GUC_DOORBELL_ENABLED 1
77#define GUC_DOORBELL_DISABLED 0 78#define GUC_DOORBELL_DISABLED 0
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 876e5da44c4e..f2b88c7209cb 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -59,9 +59,12 @@
59 * 59 *
60 */ 60 */
61 61
62#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6.bin" 62#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6_1.bin"
63MODULE_FIRMWARE(I915_SKL_GUC_UCODE); 63MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
64 64
65#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin"
66MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
67
65/* User-friendly representation of an enum */ 68/* User-friendly representation of an enum */
66const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status) 69const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
67{ 70{
@@ -100,6 +103,7 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
100{ 103{
101 struct intel_engine_cs *engine; 104 struct intel_engine_cs *engine;
102 int irqs; 105 int irqs;
106 u32 tmp;
103 107
104 /* tell all command streamers to forward interrupts and vblank to GuC */ 108 /* tell all command streamers to forward interrupts and vblank to GuC */
105 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS); 109 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
@@ -114,6 +118,16 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
114 I915_WRITE(GUC_BCS_RCS_IER, ~irqs); 118 I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
115 I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs); 119 I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
116 I915_WRITE(GUC_WD_VECS_IER, ~irqs); 120 I915_WRITE(GUC_WD_VECS_IER, ~irqs);
121
122 /*
123 * If GuC has routed PM interrupts to itself, don't keep it.
124 * and keep other interrupts those are unmasked by GuC.
125 */
126 tmp = I915_READ(GEN6_PMINTRMSK);
127 if (tmp & GEN8_PMINTR_REDIRECT_TO_NON_DISP) {
128 dev_priv->rps.pm_intr_keep |= ~(tmp & ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
129 dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
130 }
117} 131}
118 132
119static u32 get_gttype(struct drm_i915_private *dev_priv) 133static u32 get_gttype(struct drm_i915_private *dev_priv)
@@ -281,6 +295,17 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
281 return ret; 295 return ret;
282} 296}
283 297
298static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
299{
300 u32 wopcm_size = GUC_WOPCM_TOP;
301
302 /* On BXT, the top of WOPCM is reserved for RC6 context */
303 if (IS_BROXTON(dev_priv))
304 wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
305
306 return wopcm_size;
307}
308
284/* 309/*
285 * Load the GuC firmware blob into the MinuteIA. 310 * Load the GuC firmware blob into the MinuteIA.
286 */ 311 */
@@ -308,7 +333,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
308 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 333 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
309 334
310 /* init WOPCM */ 335 /* init WOPCM */
311 I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE); 336 I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv));
312 I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE); 337 I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
313 338
314 /* Enable MIA caching. GuC clock gating is disabled. */ 339 /* Enable MIA caching. GuC clock gating is disabled. */
@@ -372,65 +397,58 @@ static int i915_reset_guc(struct drm_i915_private *dev_priv)
372} 397}
373 398
374/** 399/**
375 * intel_guc_ucode_load() - load GuC uCode into the device 400 * intel_guc_setup() - finish preparing the GuC for activity
376 * @dev: drm device 401 * @dev: drm device
377 * 402 *
378 * Called from gem_init_hw() during driver loading and also after a GPU reset. 403 * Called from gem_init_hw() during driver loading and also after a GPU reset.
379 * 404 *
405 * The main action required here it to load the GuC uCode into the device.
380 * The firmware image should have already been fetched into memory by the 406 * The firmware image should have already been fetched into memory by the
381 * earlier call to intel_guc_ucode_init(), so here we need only check that 407 * earlier call to intel_guc_init(), so here we need only check that worked,
382 * is succeeded, and then transfer the image to the h/w. 408 * and then transfer the image to the h/w.
383 * 409 *
384 * Return: non-zero code on error 410 * Return: non-zero code on error
385 */ 411 */
386int intel_guc_ucode_load(struct drm_device *dev) 412int intel_guc_setup(struct drm_device *dev)
387{ 413{
388 struct drm_i915_private *dev_priv = dev->dev_private; 414 struct drm_i915_private *dev_priv = dev->dev_private;
389 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 415 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
390 int retries, err = 0; 416 const char *fw_path = guc_fw->guc_fw_path;
417 int retries, ret, err;
391 418
392 if (!i915.enable_guc_submission) 419 DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
393 return 0; 420 fw_path,
394
395 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
396 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), 421 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
397 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); 422 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
398 423
399 direct_interrupts_to_host(dev_priv); 424 /* Loading forbidden, or no firmware to load? */
400 425 if (!i915.enable_guc_loading) {
401 if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE) 426 err = 0;
402 return 0; 427 goto fail;
403 428 } else if (fw_path == NULL || *fw_path == '\0') {
404 if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS && 429 if (*fw_path == '\0')
405 guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) 430 DRM_INFO("No GuC firmware known for this platform\n");
406 return -ENOEXEC; 431 err = -ENODEV;
407 432 goto fail;
408 guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING; 433 }
409
410 DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
411 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
412 434
413 switch (guc_fw->guc_fw_fetch_status) { 435 /* Fetch failed, or already fetched but failed to load? */
414 case GUC_FIRMWARE_FAIL: 436 if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) {
415 /* something went wrong :( */
416 err = -EIO; 437 err = -EIO;
417 goto fail; 438 goto fail;
418 439 } else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) {
419 case GUC_FIRMWARE_NONE: 440 err = -ENOEXEC;
420 case GUC_FIRMWARE_PENDING:
421 default:
422 /* "can't happen" */
423 WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
424 guc_fw->guc_fw_path,
425 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
426 guc_fw->guc_fw_fetch_status);
427 err = -ENXIO;
428 goto fail; 441 goto fail;
429
430 case GUC_FIRMWARE_SUCCESS:
431 break;
432 } 442 }
433 443
444 direct_interrupts_to_host(dev_priv);
445
446 guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
447
448 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
449 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
450 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
451
434 err = i915_guc_submission_init(dev); 452 err = i915_guc_submission_init(dev);
435 if (err) 453 if (err)
436 goto fail; 454 goto fail;
@@ -448,7 +466,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
448 */ 466 */
449 err = i915_reset_guc(dev_priv); 467 err = i915_reset_guc(dev_priv);
450 if (err) { 468 if (err) {
451 DRM_ERROR("GuC reset failed, err %d\n", err); 469 DRM_ERROR("GuC reset failed: %d\n", err);
452 goto fail; 470 goto fail;
453 } 471 }
454 472
@@ -459,8 +477,8 @@ int intel_guc_ucode_load(struct drm_device *dev)
459 if (--retries == 0) 477 if (--retries == 0)
460 goto fail; 478 goto fail;
461 479
462 DRM_INFO("GuC fw load failed, err %d; will reset and " 480 DRM_INFO("GuC fw load failed: %d; will reset and "
463 "retry %d more time(s)\n", err, retries); 481 "retry %d more time(s)\n", err, retries);
464 } 482 }
465 483
466 guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS; 484 guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
@@ -482,7 +500,6 @@ int intel_guc_ucode_load(struct drm_device *dev)
482 return 0; 500 return 0;
483 501
484fail: 502fail:
485 DRM_ERROR("GuC firmware load failed, err %d\n", err);
486 if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING) 503 if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
487 guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL; 504 guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
488 505
@@ -490,7 +507,41 @@ fail:
490 i915_guc_submission_disable(dev); 507 i915_guc_submission_disable(dev);
491 i915_guc_submission_fini(dev); 508 i915_guc_submission_fini(dev);
492 509
493 return err; 510 /*
511 * We've failed to load the firmware :(
512 *
513 * Decide whether to disable GuC submission and fall back to
514 * execlist mode, and whether to hide the error by returning
515 * zero or to return -EIO, which the caller will treat as a
516 * nonfatal error (i.e. it doesn't prevent driver load, but
517 * marks the GPU as wedged until reset).
518 */
519 if (i915.enable_guc_loading > 1) {
520 ret = -EIO;
521 } else if (i915.enable_guc_submission > 1) {
522 ret = -EIO;
523 } else {
524 ret = 0;
525 }
526
527 if (err == 0)
528 DRM_INFO("GuC firmware load skipped\n");
529 else if (ret == -EIO)
530 DRM_ERROR("GuC firmware load failed: %d\n", err);
531 else
532 DRM_INFO("GuC firmware load failed: %d\n", err);
533
534 if (i915.enable_guc_submission) {
535 if (fw_path == NULL)
536 DRM_INFO("GuC submission without firmware not supported\n");
537 if (ret == 0)
538 DRM_INFO("Falling back to execlist mode\n");
539 else
540 DRM_ERROR("GuC init failed: %d\n", ret);
541 }
542 i915.enable_guc_submission = 0;
543
544 return ret;
494} 545}
495 546
496static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) 547static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
@@ -552,9 +603,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
552 603
553 /* Header and uCode will be loaded to WOPCM. Size of the two. */ 604 /* Header and uCode will be loaded to WOPCM. Size of the two. */
554 size = guc_fw->header_size + guc_fw->ucode_size; 605 size = guc_fw->header_size + guc_fw->ucode_size;
555 606 if (size > guc_wopcm_size(dev->dev_private)) {
556 /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
557 if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
558 DRM_ERROR("Firmware is too large to fit in WOPCM\n"); 607 DRM_ERROR("Firmware is too large to fit in WOPCM\n");
559 goto fail; 608 goto fail;
560 } 609 }
@@ -617,22 +666,25 @@ fail:
617} 666}
618 667
619/** 668/**
620 * intel_guc_ucode_init() - define parameters and fetch firmware 669 * intel_guc_init() - define parameters and fetch firmware
621 * @dev: drm device 670 * @dev: drm device
622 * 671 *
623 * Called early during driver load, but after GEM is initialised. 672 * Called early during driver load, but after GEM is initialised.
624 * 673 *
625 * The firmware will be transferred to the GuC's memory later, 674 * The firmware will be transferred to the GuC's memory later,
626 * when intel_guc_ucode_load() is called. 675 * when intel_guc_setup() is called.
627 */ 676 */
628void intel_guc_ucode_init(struct drm_device *dev) 677void intel_guc_init(struct drm_device *dev)
629{ 678{
630 struct drm_i915_private *dev_priv = dev->dev_private; 679 struct drm_i915_private *dev_priv = dev->dev_private;
631 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 680 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
632 const char *fw_path; 681 const char *fw_path;
633 682
634 if (!HAS_GUC_SCHED(dev)) 683 /* A negative value means "use platform default" */
635 i915.enable_guc_submission = false; 684 if (i915.enable_guc_loading < 0)
685 i915.enable_guc_loading = HAS_GUC_UCODE(dev);
686 if (i915.enable_guc_submission < 0)
687 i915.enable_guc_submission = HAS_GUC_SCHED(dev);
636 688
637 if (!HAS_GUC_UCODE(dev)) { 689 if (!HAS_GUC_UCODE(dev)) {
638 fw_path = NULL; 690 fw_path = NULL;
@@ -640,27 +692,26 @@ void intel_guc_ucode_init(struct drm_device *dev)
640 fw_path = I915_SKL_GUC_UCODE; 692 fw_path = I915_SKL_GUC_UCODE;
641 guc_fw->guc_fw_major_wanted = 6; 693 guc_fw->guc_fw_major_wanted = 6;
642 guc_fw->guc_fw_minor_wanted = 1; 694 guc_fw->guc_fw_minor_wanted = 1;
695 } else if (IS_BROXTON(dev)) {
696 fw_path = I915_BXT_GUC_UCODE;
697 guc_fw->guc_fw_major_wanted = 8;
698 guc_fw->guc_fw_minor_wanted = 7;
643 } else { 699 } else {
644 i915.enable_guc_submission = false;
645 fw_path = ""; /* unknown device */ 700 fw_path = ""; /* unknown device */
646 } 701 }
647 702
648 if (!i915.enable_guc_submission)
649 return;
650
651 guc_fw->guc_dev = dev; 703 guc_fw->guc_dev = dev;
652 guc_fw->guc_fw_path = fw_path; 704 guc_fw->guc_fw_path = fw_path;
653 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; 705 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
654 guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE; 706 guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
655 707
708 /* Early (and silent) return if GuC loading is disabled */
709 if (!i915.enable_guc_loading)
710 return;
656 if (fw_path == NULL) 711 if (fw_path == NULL)
657 return; 712 return;
658 713 if (*fw_path == '\0')
659 if (*fw_path == '\0') {
660 DRM_ERROR("No GuC firmware known for this platform\n");
661 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
662 return; 714 return;
663 }
664 715
665 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING; 716 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
666 DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path); 717 DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
@@ -669,10 +720,10 @@ void intel_guc_ucode_init(struct drm_device *dev)
669} 720}
670 721
671/** 722/**
672 * intel_guc_ucode_fini() - clean up all allocated resources 723 * intel_guc_fini() - clean up all allocated resources
673 * @dev: drm device 724 * @dev: drm device
674 */ 725 */
675void intel_guc_ucode_fini(struct drm_device *dev) 726void intel_guc_fini(struct drm_device *dev)
676{ 727{
677 struct drm_i915_private *dev_priv = dev->dev_private; 728 struct drm_i915_private *dev_priv = dev->dev_private;
678 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 729 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2c3bd9c2573e..6b29da9bba38 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1678,35 +1678,12 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
1678 struct intel_crtc *intel_crtc = 1678 struct intel_crtc *intel_crtc =
1679 to_intel_crtc(encoder->base.crtc); 1679 to_intel_crtc(encoder->base.crtc);
1680 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 1680 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
1681 enum dpio_channel port = vlv_dport_to_channel(dport);
1682 int pipe = intel_crtc->pipe;
1683 u32 val;
1684 1681
1685 /* Enable clock channels for this port */ 1682 vlv_phy_pre_encoder_enable(encoder);
1686 mutex_lock(&dev_priv->sb_lock);
1687 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
1688 val = 0;
1689 if (pipe)
1690 val |= (1<<21);
1691 else
1692 val &= ~(1<<21);
1693 val |= 0x001000c4;
1694 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
1695 1683
1696 /* HDMI 1.0V-2dB */ 1684 /* HDMI 1.0V-2dB */
1697 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0); 1685 vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
1698 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f); 1686 0x2b247878);
1699 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a);
1700 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040);
1701 vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878);
1702 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
1703 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
1704 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
1705
1706 /* Program lane clock */
1707 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
1708 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
1709 mutex_unlock(&dev_priv->sb_lock);
1710 1687
1711 intel_hdmi->set_infoframes(&encoder->base, 1688 intel_hdmi->set_infoframes(&encoder->base,
1712 intel_crtc->config->has_hdmi_sink, 1689 intel_crtc->config->has_hdmi_sink,
@@ -1719,207 +1696,27 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
1719 1696
1720static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) 1697static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1721{ 1698{
1722 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1723 struct drm_device *dev = encoder->base.dev;
1724 struct drm_i915_private *dev_priv = dev->dev_private;
1725 struct intel_crtc *intel_crtc =
1726 to_intel_crtc(encoder->base.crtc);
1727 enum dpio_channel port = vlv_dport_to_channel(dport);
1728 int pipe = intel_crtc->pipe;
1729
1730 intel_hdmi_prepare(encoder); 1699 intel_hdmi_prepare(encoder);
1731 1700
1732 /* Program Tx lane resets to default */ 1701 vlv_phy_pre_pll_enable(encoder);
1733 mutex_lock(&dev_priv->sb_lock);
1734 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
1735 DPIO_PCS_TX_LANE2_RESET |
1736 DPIO_PCS_TX_LANE1_RESET);
1737 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
1738 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1739 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1740 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1741 DPIO_PCS_CLK_SOFT_RESET);
1742
1743 /* Fix up inter-pair skew failure */
1744 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
1745 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
1746 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
1747
1748 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
1749 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
1750 mutex_unlock(&dev_priv->sb_lock);
1751}
1752
1753static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
1754 bool reset)
1755{
1756 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1757 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1758 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1759 enum pipe pipe = crtc->pipe;
1760 uint32_t val;
1761
1762 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
1763 if (reset)
1764 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1765 else
1766 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
1767 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
1768
1769 if (crtc->config->lane_count > 2) {
1770 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
1771 if (reset)
1772 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1773 else
1774 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
1775 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
1776 }
1777
1778 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
1779 val |= CHV_PCS_REQ_SOFTRESET_EN;
1780 if (reset)
1781 val &= ~DPIO_PCS_CLK_SOFT_RESET;
1782 else
1783 val |= DPIO_PCS_CLK_SOFT_RESET;
1784 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
1785
1786 if (crtc->config->lane_count > 2) {
1787 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
1788 val |= CHV_PCS_REQ_SOFTRESET_EN;
1789 if (reset)
1790 val &= ~DPIO_PCS_CLK_SOFT_RESET;
1791 else
1792 val |= DPIO_PCS_CLK_SOFT_RESET;
1793 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
1794 }
1795} 1702}
1796 1703
1797static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder) 1704static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1798{ 1705{
1799 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1800 struct drm_device *dev = encoder->base.dev;
1801 struct drm_i915_private *dev_priv = dev->dev_private;
1802 struct intel_crtc *intel_crtc =
1803 to_intel_crtc(encoder->base.crtc);
1804 enum dpio_channel ch = vlv_dport_to_channel(dport);
1805 enum pipe pipe = intel_crtc->pipe;
1806 u32 val;
1807
1808 intel_hdmi_prepare(encoder); 1706 intel_hdmi_prepare(encoder);
1809 1707
1810 /* 1708 chv_phy_pre_pll_enable(encoder);
1811 * Must trick the second common lane into life.
1812 * Otherwise we can't even access the PLL.
1813 */
1814 if (ch == DPIO_CH0 && pipe == PIPE_B)
1815 dport->release_cl2_override =
1816 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
1817
1818 chv_phy_powergate_lanes(encoder, true, 0x0);
1819
1820 mutex_lock(&dev_priv->sb_lock);
1821
1822 /* Assert data lane reset */
1823 chv_data_lane_soft_reset(encoder, true);
1824
1825 /* program left/right clock distribution */
1826 if (pipe != PIPE_B) {
1827 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1828 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1829 if (ch == DPIO_CH0)
1830 val |= CHV_BUFLEFTENA1_FORCE;
1831 if (ch == DPIO_CH1)
1832 val |= CHV_BUFRIGHTENA1_FORCE;
1833 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1834 } else {
1835 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1836 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1837 if (ch == DPIO_CH0)
1838 val |= CHV_BUFLEFTENA2_FORCE;
1839 if (ch == DPIO_CH1)
1840 val |= CHV_BUFRIGHTENA2_FORCE;
1841 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1842 }
1843
1844 /* program clock channel usage */
1845 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
1846 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
1847 if (pipe != PIPE_B)
1848 val &= ~CHV_PCS_USEDCLKCHANNEL;
1849 else
1850 val |= CHV_PCS_USEDCLKCHANNEL;
1851 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
1852
1853 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
1854 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
1855 if (pipe != PIPE_B)
1856 val &= ~CHV_PCS_USEDCLKCHANNEL;
1857 else
1858 val |= CHV_PCS_USEDCLKCHANNEL;
1859 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
1860
1861 /*
1862 * This a a bit weird since generally CL
1863 * matches the pipe, but here we need to
1864 * pick the CL based on the port.
1865 */
1866 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
1867 if (pipe != PIPE_B)
1868 val &= ~CHV_CMN_USEDCLKCHANNEL;
1869 else
1870 val |= CHV_CMN_USEDCLKCHANNEL;
1871 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
1872
1873 mutex_unlock(&dev_priv->sb_lock);
1874} 1709}
1875 1710
1876static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder) 1711static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
1877{ 1712{
1878 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1713 chv_phy_post_pll_disable(encoder);
1879 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
1880 u32 val;
1881
1882 mutex_lock(&dev_priv->sb_lock);
1883
1884 /* disable left/right clock distribution */
1885 if (pipe != PIPE_B) {
1886 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1887 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1888 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1889 } else {
1890 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1891 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1892 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1893 }
1894
1895 mutex_unlock(&dev_priv->sb_lock);
1896
1897 /*
1898 * Leave the power down bit cleared for at least one
1899 * lane so that chv_powergate_phy_ch() will power
1900 * on something when the channel is otherwise unused.
1901 * When the port is off and the override is removed
1902 * the lanes power down anyway, so otherwise it doesn't
1903 * really matter what the state of power down bits is
1904 * after this.
1905 */
1906 chv_phy_powergate_lanes(encoder, false, 0x0);
1907} 1714}
1908 1715
1909static void vlv_hdmi_post_disable(struct intel_encoder *encoder) 1716static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
1910{ 1717{
1911 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1912 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1913 struct intel_crtc *intel_crtc =
1914 to_intel_crtc(encoder->base.crtc);
1915 enum dpio_channel port = vlv_dport_to_channel(dport);
1916 int pipe = intel_crtc->pipe;
1917
1918 /* Reset lanes to avoid HDMI flicker (VLV w/a) */ 1718 /* Reset lanes to avoid HDMI flicker (VLV w/a) */
1919 mutex_lock(&dev_priv->sb_lock); 1719 vlv_phy_reset_lanes(encoder);
1920 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
1921 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
1922 mutex_unlock(&dev_priv->sb_lock);
1923} 1720}
1924 1721
1925static void chv_hdmi_post_disable(struct intel_encoder *encoder) 1722static void chv_hdmi_post_disable(struct intel_encoder *encoder)
@@ -1944,138 +1741,12 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1944 struct intel_crtc *intel_crtc = 1741 struct intel_crtc *intel_crtc =
1945 to_intel_crtc(encoder->base.crtc); 1742 to_intel_crtc(encoder->base.crtc);
1946 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 1743 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
1947 enum dpio_channel ch = vlv_dport_to_channel(dport);
1948 int pipe = intel_crtc->pipe;
1949 int data, i, stagger;
1950 u32 val;
1951 1744
1952 mutex_lock(&dev_priv->sb_lock); 1745 chv_phy_pre_encoder_enable(encoder);
1953
1954 /* allow hardware to manage TX FIFO reset source */
1955 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
1956 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
1957 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
1958
1959 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
1960 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
1961 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
1962
1963 /* Program Tx latency optimal setting */
1964 for (i = 0; i < 4; i++) {
1965 /* Set the upar bit */
1966 data = (i == 1) ? 0x0 : 0x1;
1967 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
1968 data << DPIO_UPAR_SHIFT);
1969 }
1970
1971 /* Data lane stagger programming */
1972 if (intel_crtc->config->port_clock > 270000)
1973 stagger = 0x18;
1974 else if (intel_crtc->config->port_clock > 135000)
1975 stagger = 0xd;
1976 else if (intel_crtc->config->port_clock > 67500)
1977 stagger = 0x7;
1978 else if (intel_crtc->config->port_clock > 33750)
1979 stagger = 0x4;
1980 else
1981 stagger = 0x2;
1982
1983 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
1984 val |= DPIO_TX2_STAGGER_MASK(0x1f);
1985 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
1986
1987 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
1988 val |= DPIO_TX2_STAGGER_MASK(0x1f);
1989 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
1990
1991 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
1992 DPIO_LANESTAGGER_STRAP(stagger) |
1993 DPIO_LANESTAGGER_STRAP_OVRD |
1994 DPIO_TX1_STAGGER_MASK(0x1f) |
1995 DPIO_TX1_STAGGER_MULT(6) |
1996 DPIO_TX2_STAGGER_MULT(0));
1997
1998 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
1999 DPIO_LANESTAGGER_STRAP(stagger) |
2000 DPIO_LANESTAGGER_STRAP_OVRD |
2001 DPIO_TX1_STAGGER_MASK(0x1f) |
2002 DPIO_TX1_STAGGER_MULT(7) |
2003 DPIO_TX2_STAGGER_MULT(5));
2004
2005 /* Deassert data lane reset */
2006 chv_data_lane_soft_reset(encoder, false);
2007
2008 /* Clear calc init */
2009 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2010 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2011 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
2012 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
2013 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
2014
2015 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
2016 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2017 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
2018 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
2019 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
2020
2021 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
2022 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
2023 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
2024 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
2025
2026 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
2027 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
2028 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
2029 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
2030 1746
2031 /* FIXME: Program the support xxx V-dB */ 1747 /* FIXME: Program the support xxx V-dB */
2032 /* Use 800mV-0dB */ 1748 /* Use 800mV-0dB */
2033 for (i = 0; i < 4; i++) { 1749 chv_set_phy_signal_level(encoder, 128, 102, false);
2034 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
2035 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
2036 val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT;
2037 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
2038 }
2039
2040 for (i = 0; i < 4; i++) {
2041 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
2042
2043 val &= ~DPIO_SWING_MARGIN000_MASK;
2044 val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
2045
2046 /*
2047 * Supposedly this value shouldn't matter when unique transition
2048 * scale is disabled, but in fact it does matter. Let's just
2049 * always program the same value and hope it's OK.
2050 */
2051 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
2052 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
2053
2054 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
2055 }
2056
2057 /*
2058 * The document said it needs to set bit 27 for ch0 and bit 26
2059 * for ch1. Might be a typo in the doc.
2060 * For now, for this unique transition scale selection, set bit
2061 * 27 for ch0 and ch1.
2062 */
2063 for (i = 0; i < 4; i++) {
2064 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
2065 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
2066 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
2067 }
2068
2069 /* Start swing calculation */
2070 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2071 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
2072 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
2073
2074 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
2075 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
2076 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
2077
2078 mutex_unlock(&dev_priv->sb_lock);
2079 1750
2080 intel_hdmi->set_infoframes(&encoder->base, 1751 intel_hdmi->set_infoframes(&encoder->base,
2081 intel_crtc->config->has_hdmi_sink, 1752 intel_crtc->config->has_hdmi_sink,
@@ -2086,10 +1757,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
2086 vlv_wait_port_ready(dev_priv, dport, 0x0); 1757 vlv_wait_port_ready(dev_priv, dport, 0x0);
2087 1758
2088 /* Second common lane will stay alive on its own now */ 1759 /* Second common lane will stay alive on its own now */
2089 if (dport->release_cl2_override) { 1760 chv_phy_release_cl2_override(encoder);
2090 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2091 dport->release_cl2_override = false;
2092 }
2093} 1761}
2094 1762
2095static void intel_hdmi_destroy(struct drm_connector *connector) 1763static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -2114,7 +1782,6 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
2114static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { 1782static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
2115 .get_modes = intel_hdmi_get_modes, 1783 .get_modes = intel_hdmi_get_modes,
2116 .mode_valid = intel_hdmi_mode_valid, 1784 .mode_valid = intel_hdmi_mode_valid,
2117 .best_encoder = intel_best_encoder,
2118}; 1785};
2119 1786
2120static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { 1787static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@@ -2277,7 +1944,7 @@ void intel_hdmi_init(struct drm_device *dev,
2277 intel_encoder = &intel_dig_port->base; 1944 intel_encoder = &intel_dig_port->base;
2278 1945
2279 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, 1946 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
2280 DRM_MODE_ENCODER_TMDS, NULL); 1947 DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port));
2281 1948
2282 intel_encoder->compute_config = intel_hdmi_compute_config; 1949 intel_encoder->compute_config = intel_hdmi_compute_config;
2283 if (HAS_PCH_SPLIT(dev)) { 1950 if (HAS_PCH_SPLIT(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index bee673005d48..38eeca7a6e72 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -220,7 +220,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
220 } 220 }
221 } 221 }
222 if (dev_priv->display.hpd_irq_setup) 222 if (dev_priv->display.hpd_irq_setup)
223 dev_priv->display.hpd_irq_setup(dev); 223 dev_priv->display.hpd_irq_setup(dev_priv);
224 spin_unlock_irq(&dev_priv->irq_lock); 224 spin_unlock_irq(&dev_priv->irq_lock);
225 225
226 intel_runtime_pm_put(dev_priv); 226 intel_runtime_pm_put(dev_priv);
@@ -346,7 +346,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
346 346
347/** 347/**
348 * intel_hpd_irq_handler - main hotplug irq handler 348 * intel_hpd_irq_handler - main hotplug irq handler
349 * @dev: drm device 349 * @dev_priv: drm_i915_private
350 * @pin_mask: a mask of hpd pins that have triggered the irq 350 * @pin_mask: a mask of hpd pins that have triggered the irq
351 * @long_mask: a mask of hpd pins that may be long hpd pulses 351 * @long_mask: a mask of hpd pins that may be long hpd pulses
352 * 352 *
@@ -360,10 +360,9 @@ static void i915_hotplug_work_func(struct work_struct *work)
360 * Here, we do hotplug irq storm detection and mitigation, and pass further 360 * Here, we do hotplug irq storm detection and mitigation, and pass further
361 * processing to appropriate bottom halves. 361 * processing to appropriate bottom halves.
362 */ 362 */
363void intel_hpd_irq_handler(struct drm_device *dev, 363void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
364 u32 pin_mask, u32 long_mask) 364 u32 pin_mask, u32 long_mask)
365{ 365{
366 struct drm_i915_private *dev_priv = dev->dev_private;
367 int i; 366 int i;
368 enum port port; 367 enum port port;
369 bool storm_detected = false; 368 bool storm_detected = false;
@@ -407,7 +406,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
407 * hotplug bits itself. So only WARN about unexpected 406 * hotplug bits itself. So only WARN about unexpected
408 * interrupts on saner platforms. 407 * interrupts on saner platforms.
409 */ 408 */
410 WARN_ONCE(!HAS_GMCH_DISPLAY(dev), 409 WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
411 "Received HPD interrupt on pin %d although disabled\n", i); 410 "Received HPD interrupt on pin %d although disabled\n", i);
412 continue; 411 continue;
413 } 412 }
@@ -427,7 +426,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
427 } 426 }
428 427
429 if (storm_detected) 428 if (storm_detected)
430 dev_priv->display.hpd_irq_setup(dev); 429 dev_priv->display.hpd_irq_setup(dev_priv);
431 spin_unlock(&dev_priv->irq_lock); 430 spin_unlock(&dev_priv->irq_lock);
432 431
433 /* 432 /*
@@ -485,7 +484,7 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
485 */ 484 */
486 spin_lock_irq(&dev_priv->irq_lock); 485 spin_lock_irq(&dev_priv->irq_lock);
487 if (dev_priv->display.hpd_irq_setup) 486 if (dev_priv->display.hpd_irq_setup)
488 dev_priv->display.hpd_irq_setup(dev); 487 dev_priv->display.hpd_irq_setup(dev_priv);
489 spin_unlock_irq(&dev_priv->irq_lock); 488 spin_unlock_irq(&dev_priv->irq_lock);
490} 489}
491 490
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 42eac37de047..5c191a1afaaf 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -224,10 +224,16 @@ enum {
224 FAULT_AND_CONTINUE /* Unsupported */ 224 FAULT_AND_CONTINUE /* Unsupported */
225}; 225};
226#define GEN8_CTX_ID_SHIFT 32 226#define GEN8_CTX_ID_SHIFT 32
227#define GEN8_CTX_ID_WIDTH 21
227#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 228#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
228#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26 229#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
229 230
230static int intel_lr_context_pin(struct intel_context *ctx, 231/* Typical size of the average request (2 pipecontrols and a MI_BB) */
232#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
233
234static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
235 struct intel_engine_cs *engine);
236static int intel_lr_context_pin(struct i915_gem_context *ctx,
231 struct intel_engine_cs *engine); 237 struct intel_engine_cs *engine);
232 238
233/** 239/**
@@ -240,23 +246,22 @@ static int intel_lr_context_pin(struct intel_context *ctx,
240 * 246 *
241 * Return: 1 if Execlists is supported and has to be enabled. 247 * Return: 1 if Execlists is supported and has to be enabled.
242 */ 248 */
243int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists) 249int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
244{ 250{
245 WARN_ON(i915.enable_ppgtt == -1);
246
247 /* On platforms with execlist available, vGPU will only 251 /* On platforms with execlist available, vGPU will only
248 * support execlist mode, no ring buffer mode. 252 * support execlist mode, no ring buffer mode.
249 */ 253 */
250 if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev)) 254 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
251 return 1; 255 return 1;
252 256
253 if (INTEL_INFO(dev)->gen >= 9) 257 if (INTEL_GEN(dev_priv) >= 9)
254 return 1; 258 return 1;
255 259
256 if (enable_execlists == 0) 260 if (enable_execlists == 0)
257 return 0; 261 return 0;
258 262
259 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) && 263 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
264 USES_PPGTT(dev_priv) &&
260 i915.use_mmio_flip >= 0) 265 i915.use_mmio_flip >= 0)
261 return 1; 266 return 1;
262 267
@@ -266,19 +271,19 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
266static void 271static void
267logical_ring_init_platform_invariants(struct intel_engine_cs *engine) 272logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
268{ 273{
269 struct drm_device *dev = engine->dev; 274 struct drm_i915_private *dev_priv = engine->i915;
270 275
271 if (IS_GEN8(dev) || IS_GEN9(dev)) 276 if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
272 engine->idle_lite_restore_wa = ~0; 277 engine->idle_lite_restore_wa = ~0;
273 278
274 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 279 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
275 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && 280 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
276 (engine->id == VCS || engine->id == VCS2); 281 (engine->id == VCS || engine->id == VCS2);
277 282
278 engine->ctx_desc_template = GEN8_CTX_VALID; 283 engine->ctx_desc_template = GEN8_CTX_VALID;
279 engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) << 284 engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
280 GEN8_CTX_ADDRESSING_MODE_SHIFT; 285 GEN8_CTX_ADDRESSING_MODE_SHIFT;
281 if (IS_GEN8(dev)) 286 if (IS_GEN8(dev_priv))
282 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; 287 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
283 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE; 288 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
284 289
@@ -297,7 +302,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
297 * descriptor for a pinned context 302 * descriptor for a pinned context
298 * 303 *
299 * @ctx: Context to work on 304 * @ctx: Context to work on
300 * @ring: Engine the descriptor will be used with 305 * @engine: Engine the descriptor will be used with
301 * 306 *
302 * The context descriptor encodes various attributes of a context, 307 * The context descriptor encodes various attributes of a context,
303 * including its GTT address and some flags. Because it's fairly 308 * including its GTT address and some flags. Because it's fairly
@@ -305,62 +310,41 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
305 * which remains valid until the context is unpinned. 310 * which remains valid until the context is unpinned.
306 * 311 *
307 * This is what a descriptor looks like, from LSB to MSB: 312 * This is what a descriptor looks like, from LSB to MSB:
308 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) 313 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
309 * bits 12-31: LRCA, GTT address of (the HWSP of) this context 314 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
310 * bits 32-51: ctx ID, a globally unique tag (the LRCA again!) 315 * bits 32-52: ctx ID, a globally unique tag
311 * bits 52-63: reserved, may encode the engine ID (for GuC) 316 * bits 53-54: mbz, reserved for use by hardware
317 * bits 55-63: group ID, currently unused and set to 0
312 */ 318 */
313static void 319static void
314intel_lr_context_descriptor_update(struct intel_context *ctx, 320intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
315 struct intel_engine_cs *engine) 321 struct intel_engine_cs *engine)
316{ 322{
317 uint64_t lrca, desc; 323 struct intel_context *ce = &ctx->engine[engine->id];
324 u64 desc;
318 325
319 lrca = ctx->engine[engine->id].lrc_vma->node.start + 326 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
320 LRC_PPHWSP_PN * PAGE_SIZE;
321 327
322 desc = engine->ctx_desc_template; /* bits 0-11 */ 328 desc = engine->ctx_desc_template; /* bits 0-11 */
323 desc |= lrca; /* bits 12-31 */ 329 desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
324 desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */ 330 /* bits 12-31 */
331 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
325 332
326 ctx->engine[engine->id].lrc_desc = desc; 333 ce->lrc_desc = desc;
327} 334}
328 335
329uint64_t intel_lr_context_descriptor(struct intel_context *ctx, 336uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
330 struct intel_engine_cs *engine) 337 struct intel_engine_cs *engine)
331{ 338{
332 return ctx->engine[engine->id].lrc_desc; 339 return ctx->engine[engine->id].lrc_desc;
333} 340}
334 341
335/**
336 * intel_execlists_ctx_id() - get the Execlists Context ID
337 * @ctx: Context to get the ID for
338 * @ring: Engine to get the ID for
339 *
340 * Do not confuse with ctx->id! Unfortunately we have a name overload
341 * here: the old context ID we pass to userspace as a handler so that
342 * they can refer to a context, and the new context ID we pass to the
343 * ELSP so that the GPU can inform us of the context status via
344 * interrupts.
345 *
346 * The context ID is a portion of the context descriptor, so we can
347 * just extract the required part from the cached descriptor.
348 *
349 * Return: 20-bits globally unique context ID.
350 */
351u32 intel_execlists_ctx_id(struct intel_context *ctx,
352 struct intel_engine_cs *engine)
353{
354 return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
355}
356
357static void execlists_elsp_write(struct drm_i915_gem_request *rq0, 342static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
358 struct drm_i915_gem_request *rq1) 343 struct drm_i915_gem_request *rq1)
359{ 344{
360 345
361 struct intel_engine_cs *engine = rq0->engine; 346 struct intel_engine_cs *engine = rq0->engine;
362 struct drm_device *dev = engine->dev; 347 struct drm_i915_private *dev_priv = rq0->i915;
363 struct drm_i915_private *dev_priv = dev->dev_private;
364 uint64_t desc[2]; 348 uint64_t desc[2];
365 349
366 if (rq1) { 350 if (rq1) {
@@ -442,7 +426,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
442 * If irqs are not active generate a warning as batches that finish 426 * If irqs are not active generate a warning as batches that finish
443 * without the irqs may get lost and a GPU Hang may occur. 427 * without the irqs may get lost and a GPU Hang may occur.
444 */ 428 */
445 WARN_ON(!intel_irqs_enabled(engine->dev->dev_private)); 429 WARN_ON(!intel_irqs_enabled(engine->i915));
446 430
447 /* Try to read in pairs */ 431 /* Try to read in pairs */
448 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue, 432 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
@@ -453,8 +437,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
453 /* Same ctx: ignore first request, as second request 437 /* Same ctx: ignore first request, as second request
454 * will update tail past first request's workload */ 438 * will update tail past first request's workload */
455 cursor->elsp_submitted = req0->elsp_submitted; 439 cursor->elsp_submitted = req0->elsp_submitted;
456 list_move_tail(&req0->execlist_link, 440 list_del(&req0->execlist_link);
457 &engine->execlist_retired_req_list); 441 i915_gem_request_unreference(req0);
458 req0 = cursor; 442 req0 = cursor;
459 } else { 443 } else {
460 req1 = cursor; 444 req1 = cursor;
@@ -486,7 +470,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
486} 470}
487 471
488static unsigned int 472static unsigned int
489execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id) 473execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
490{ 474{
491 struct drm_i915_gem_request *head_req; 475 struct drm_i915_gem_request *head_req;
492 476
@@ -496,19 +480,16 @@ execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
496 struct drm_i915_gem_request, 480 struct drm_i915_gem_request,
497 execlist_link); 481 execlist_link);
498 482
499 if (!head_req) 483 if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
500 return 0; 484 return 0;
501
502 if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id))
503 return 0;
504 485
505 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n"); 486 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
506 487
507 if (--head_req->elsp_submitted > 0) 488 if (--head_req->elsp_submitted > 0)
508 return 0; 489 return 0;
509 490
510 list_move_tail(&head_req->execlist_link, 491 list_del(&head_req->execlist_link);
511 &engine->execlist_retired_req_list); 492 i915_gem_request_unreference(head_req);
512 493
513 return 1; 494 return 1;
514} 495}
@@ -517,7 +498,7 @@ static u32
517get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, 498get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
518 u32 *context_id) 499 u32 *context_id)
519{ 500{
520 struct drm_i915_private *dev_priv = engine->dev->dev_private; 501 struct drm_i915_private *dev_priv = engine->i915;
521 u32 status; 502 u32 status;
522 503
523 read_pointer %= GEN8_CSB_ENTRIES; 504 read_pointer %= GEN8_CSB_ENTRIES;
@@ -543,7 +524,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
543static void intel_lrc_irq_handler(unsigned long data) 524static void intel_lrc_irq_handler(unsigned long data)
544{ 525{
545 struct intel_engine_cs *engine = (struct intel_engine_cs *)data; 526 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
546 struct drm_i915_private *dev_priv = engine->dev->dev_private; 527 struct drm_i915_private *dev_priv = engine->i915;
547 u32 status_pointer; 528 u32 status_pointer;
548 unsigned int read_pointer, write_pointer; 529 unsigned int read_pointer, write_pointer;
549 u32 csb[GEN8_CSB_ENTRIES][2]; 530 u32 csb[GEN8_CSB_ENTRIES][2];
@@ -612,11 +593,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
612 struct drm_i915_gem_request *cursor; 593 struct drm_i915_gem_request *cursor;
613 int num_elements = 0; 594 int num_elements = 0;
614 595
615 if (request->ctx != request->i915->kernel_context)
616 intel_lr_context_pin(request->ctx, engine);
617
618 i915_gem_request_reference(request);
619
620 spin_lock_bh(&engine->execlist_lock); 596 spin_lock_bh(&engine->execlist_lock);
621 597
622 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) 598 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
@@ -633,12 +609,14 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
633 if (request->ctx == tail_req->ctx) { 609 if (request->ctx == tail_req->ctx) {
634 WARN(tail_req->elsp_submitted != 0, 610 WARN(tail_req->elsp_submitted != 0,
635 "More than 2 already-submitted reqs queued\n"); 611 "More than 2 already-submitted reqs queued\n");
636 list_move_tail(&tail_req->execlist_link, 612 list_del(&tail_req->execlist_link);
637 &engine->execlist_retired_req_list); 613 i915_gem_request_unreference(tail_req);
638 } 614 }
639 } 615 }
640 616
617 i915_gem_request_reference(request);
641 list_add_tail(&request->execlist_link, &engine->execlist_queue); 618 list_add_tail(&request->execlist_link, &engine->execlist_queue);
619 request->ctx_hw_id = request->ctx->hw_id;
642 if (num_elements == 0) 620 if (num_elements == 0)
643 execlists_context_unqueue(engine); 621 execlists_context_unqueue(engine);
644 622
@@ -698,9 +676,23 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
698 676
699int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) 677int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
700{ 678{
701 int ret = 0; 679 struct intel_engine_cs *engine = request->engine;
680 struct intel_context *ce = &request->ctx->engine[engine->id];
681 int ret;
682
683 /* Flush enough space to reduce the likelihood of waiting after
684 * we start building the request - in which case we will just
685 * have to repeat work.
686 */
687 request->reserved_space += EXECLISTS_REQUEST_SIZE;
702 688
703 request->ringbuf = request->ctx->engine[request->engine->id].ringbuf; 689 if (!ce->state) {
690 ret = execlists_context_deferred_alloc(request->ctx, engine);
691 if (ret)
692 return ret;
693 }
694
695 request->ringbuf = ce->ringbuf;
704 696
705 if (i915.enable_guc_submission) { 697 if (i915.enable_guc_submission) {
706 /* 698 /*
@@ -708,16 +700,39 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
708 * going any further, as the i915_add_request() call 700 * going any further, as the i915_add_request() call
709 * later on mustn't fail ... 701 * later on mustn't fail ...
710 */ 702 */
711 struct intel_guc *guc = &request->i915->guc; 703 ret = i915_guc_wq_check_space(request);
712
713 ret = i915_guc_wq_check_space(guc->execbuf_client);
714 if (ret) 704 if (ret)
715 return ret; 705 return ret;
716 } 706 }
717 707
718 if (request->ctx != request->i915->kernel_context) 708 ret = intel_lr_context_pin(request->ctx, engine);
719 ret = intel_lr_context_pin(request->ctx, request->engine); 709 if (ret)
710 return ret;
711
712 ret = intel_ring_begin(request, 0);
713 if (ret)
714 goto err_unpin;
715
716 if (!ce->initialised) {
717 ret = engine->init_context(request);
718 if (ret)
719 goto err_unpin;
720
721 ce->initialised = true;
722 }
723
724 /* Note that after this point, we have committed to using
725 * this request as it is being used to both track the
726 * state of engine initialisation and liveness of the
727 * golden renderstate above. Think twice before you try
728 * to cancel/unwind this request now.
729 */
730
731 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
732 return 0;
720 733
734err_unpin:
735 intel_lr_context_unpin(request->ctx, engine);
721 return ret; 736 return ret;
722} 737}
723 738
@@ -734,7 +749,6 @@ static int
734intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) 749intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
735{ 750{
736 struct intel_ringbuffer *ringbuf = request->ringbuf; 751 struct intel_ringbuffer *ringbuf = request->ringbuf;
737 struct drm_i915_private *dev_priv = request->i915;
738 struct intel_engine_cs *engine = request->engine; 752 struct intel_engine_cs *engine = request->engine;
739 753
740 intel_logical_ring_advance(ringbuf); 754 intel_logical_ring_advance(ringbuf);
@@ -753,40 +767,23 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
753 if (intel_engine_stopped(engine)) 767 if (intel_engine_stopped(engine))
754 return 0; 768 return 0;
755 769
756 if (engine->last_context != request->ctx) { 770 /* We keep the previous context alive until we retire the following
757 if (engine->last_context) 771 * request. This ensures that any the context object is still pinned
758 intel_lr_context_unpin(engine->last_context, engine); 772 * for any residual writes the HW makes into it on the context switch
759 if (request->ctx != request->i915->kernel_context) { 773 * into the next object following the breadcrumb. Otherwise, we may
760 intel_lr_context_pin(request->ctx, engine); 774 * retire the context too early.
761 engine->last_context = request->ctx; 775 */
762 } else { 776 request->previous_context = engine->last_context;
763 engine->last_context = NULL; 777 engine->last_context = request->ctx;
764 }
765 }
766 778
767 if (dev_priv->guc.execbuf_client) 779 if (i915.enable_guc_submission)
768 i915_guc_submit(dev_priv->guc.execbuf_client, request); 780 i915_guc_submit(request);
769 else 781 else
770 execlists_context_queue(request); 782 execlists_context_queue(request);
771 783
772 return 0; 784 return 0;
773} 785}
774 786
775int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
776{
777 /*
778 * The first call merely notes the reserve request and is common for
779 * all back ends. The subsequent localised _begin() call actually
780 * ensures that the reservation is available. Without the begin, if
781 * the request creator immediately submitted the request without
782 * adding any commands to it then there might not actually be
783 * sufficient room for the submission commands.
784 */
785 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
786
787 return intel_ring_begin(request, 0);
788}
789
790/** 787/**
791 * execlists_submission() - submit a batchbuffer for execution, Execlists style 788 * execlists_submission() - submit a batchbuffer for execution, Execlists style
792 * @dev: DRM device. 789 * @dev: DRM device.
@@ -881,28 +878,18 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
881 return 0; 878 return 0;
882} 879}
883 880
884void intel_execlists_retire_requests(struct intel_engine_cs *engine) 881void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
885{ 882{
886 struct drm_i915_gem_request *req, *tmp; 883 struct drm_i915_gem_request *req, *tmp;
887 struct list_head retired_list; 884 LIST_HEAD(cancel_list);
888 885
889 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); 886 WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex));
890 if (list_empty(&engine->execlist_retired_req_list))
891 return;
892 887
893 INIT_LIST_HEAD(&retired_list);
894 spin_lock_bh(&engine->execlist_lock); 888 spin_lock_bh(&engine->execlist_lock);
895 list_replace_init(&engine->execlist_retired_req_list, &retired_list); 889 list_replace_init(&engine->execlist_queue, &cancel_list);
896 spin_unlock_bh(&engine->execlist_lock); 890 spin_unlock_bh(&engine->execlist_lock);
897 891
898 list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) { 892 list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
899 struct intel_context *ctx = req->ctx;
900 struct drm_i915_gem_object *ctx_obj =
901 ctx->engine[engine->id].state;
902
903 if (ctx_obj && (ctx != req->i915->kernel_context))
904 intel_lr_context_unpin(ctx, engine);
905
906 list_del(&req->execlist_link); 893 list_del(&req->execlist_link);
907 i915_gem_request_unreference(req); 894 i915_gem_request_unreference(req);
908 } 895 }
@@ -910,7 +897,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine)
910 897
911void intel_logical_ring_stop(struct intel_engine_cs *engine) 898void intel_logical_ring_stop(struct intel_engine_cs *engine)
912{ 899{
913 struct drm_i915_private *dev_priv = engine->dev->dev_private; 900 struct drm_i915_private *dev_priv = engine->i915;
914 int ret; 901 int ret;
915 902
916 if (!intel_engine_initialized(engine)) 903 if (!intel_engine_initialized(engine))
@@ -946,25 +933,26 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
946 return 0; 933 return 0;
947} 934}
948 935
949static int intel_lr_context_do_pin(struct intel_context *ctx, 936static int intel_lr_context_pin(struct i915_gem_context *ctx,
950 struct intel_engine_cs *engine) 937 struct intel_engine_cs *engine)
951{ 938{
952 struct drm_device *dev = engine->dev; 939 struct drm_i915_private *dev_priv = ctx->i915;
953 struct drm_i915_private *dev_priv = dev->dev_private; 940 struct intel_context *ce = &ctx->engine[engine->id];
954 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
955 struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
956 void *vaddr; 941 void *vaddr;
957 u32 *lrc_reg_state; 942 u32 *lrc_reg_state;
958 int ret; 943 int ret;
959 944
960 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); 945 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
961 946
962 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 947 if (ce->pin_count++)
963 PIN_OFFSET_BIAS | GUC_WOPCM_TOP); 948 return 0;
949
950 ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
951 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
964 if (ret) 952 if (ret)
965 return ret; 953 goto err;
966 954
967 vaddr = i915_gem_object_pin_map(ctx_obj); 955 vaddr = i915_gem_object_pin_map(ce->state);
968 if (IS_ERR(vaddr)) { 956 if (IS_ERR(vaddr)) {
969 ret = PTR_ERR(vaddr); 957 ret = PTR_ERR(vaddr);
970 goto unpin_ctx_obj; 958 goto unpin_ctx_obj;
@@ -972,65 +960,54 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
972 960
973 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; 961 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
974 962
975 ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf); 963 ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
976 if (ret) 964 if (ret)
977 goto unpin_map; 965 goto unpin_map;
978 966
979 ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); 967 i915_gem_context_reference(ctx);
968 ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
980 intel_lr_context_descriptor_update(ctx, engine); 969 intel_lr_context_descriptor_update(ctx, engine);
981 lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; 970
982 ctx->engine[engine->id].lrc_reg_state = lrc_reg_state; 971 lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
983 ctx_obj->dirty = true; 972 ce->lrc_reg_state = lrc_reg_state;
973 ce->state->dirty = true;
984 974
985 /* Invalidate GuC TLB. */ 975 /* Invalidate GuC TLB. */
986 if (i915.enable_guc_submission) 976 if (i915.enable_guc_submission)
987 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); 977 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
988 978
989 return ret; 979 return 0;
990 980
991unpin_map: 981unpin_map:
992 i915_gem_object_unpin_map(ctx_obj); 982 i915_gem_object_unpin_map(ce->state);
993unpin_ctx_obj: 983unpin_ctx_obj:
994 i915_gem_object_ggtt_unpin(ctx_obj); 984 i915_gem_object_ggtt_unpin(ce->state);
995 985err:
986 ce->pin_count = 0;
996 return ret; 987 return ret;
997} 988}
998 989
999static int intel_lr_context_pin(struct intel_context *ctx, 990void intel_lr_context_unpin(struct i915_gem_context *ctx,
1000 struct intel_engine_cs *engine) 991 struct intel_engine_cs *engine)
1001{ 992{
1002 int ret = 0; 993 struct intel_context *ce = &ctx->engine[engine->id];
1003 994
1004 if (ctx->engine[engine->id].pin_count++ == 0) { 995 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
1005 ret = intel_lr_context_do_pin(ctx, engine); 996 GEM_BUG_ON(ce->pin_count == 0);
1006 if (ret)
1007 goto reset_pin_count;
1008 997
1009 i915_gem_context_reference(ctx); 998 if (--ce->pin_count)
1010 } 999 return;
1011 return ret;
1012 1000
1013reset_pin_count: 1001 intel_unpin_ringbuffer_obj(ce->ringbuf);
1014 ctx->engine[engine->id].pin_count = 0;
1015 return ret;
1016}
1017 1002
1018void intel_lr_context_unpin(struct intel_context *ctx, 1003 i915_gem_object_unpin_map(ce->state);
1019 struct intel_engine_cs *engine) 1004 i915_gem_object_ggtt_unpin(ce->state);
1020{
1021 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
1022 1005
1023 WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex)); 1006 ce->lrc_vma = NULL;
1024 if (--ctx->engine[engine->id].pin_count == 0) { 1007 ce->lrc_desc = 0;
1025 i915_gem_object_unpin_map(ctx_obj); 1008 ce->lrc_reg_state = NULL;
1026 intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
1027 i915_gem_object_ggtt_unpin(ctx_obj);
1028 ctx->engine[engine->id].lrc_vma = NULL;
1029 ctx->engine[engine->id].lrc_desc = 0;
1030 ctx->engine[engine->id].lrc_reg_state = NULL;
1031 1009
1032 i915_gem_context_unreference(ctx); 1010 i915_gem_context_unreference(ctx);
1033 }
1034} 1011}
1035 1012
1036static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) 1013static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
@@ -1038,9 +1015,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1038 int ret, i; 1015 int ret, i;
1039 struct intel_engine_cs *engine = req->engine; 1016 struct intel_engine_cs *engine = req->engine;
1040 struct intel_ringbuffer *ringbuf = req->ringbuf; 1017 struct intel_ringbuffer *ringbuf = req->ringbuf;
1041 struct drm_device *dev = engine->dev; 1018 struct i915_workarounds *w = &req->i915->workarounds;
1042 struct drm_i915_private *dev_priv = dev->dev_private;
1043 struct i915_workarounds *w = &dev_priv->workarounds;
1044 1019
1045 if (w->count == 0) 1020 if (w->count == 0)
1046 return 0; 1021 return 0;
@@ -1111,7 +1086,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
1111 * this batch updates GEN8_L3SQCREG4 with default value we need to 1086 * this batch updates GEN8_L3SQCREG4 with default value we need to
1112 * set this bit here to retain the WA during flush. 1087 * set this bit here to retain the WA during flush.
1113 */ 1088 */
1114 if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0)) 1089 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0))
1115 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; 1090 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1116 1091
1117 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | 1092 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@@ -1200,7 +1175,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
1200 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1175 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1201 1176
1202 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ 1177 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1203 if (IS_BROADWELL(engine->dev)) { 1178 if (IS_BROADWELL(engine->i915)) {
1204 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index); 1179 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1205 if (rc < 0) 1180 if (rc < 0)
1206 return rc; 1181 return rc;
@@ -1272,12 +1247,11 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
1272 uint32_t *offset) 1247 uint32_t *offset)
1273{ 1248{
1274 int ret; 1249 int ret;
1275 struct drm_device *dev = engine->dev;
1276 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1250 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1277 1251
1278 /* WaDisableCtxRestoreArbitration:skl,bxt */ 1252 /* WaDisableCtxRestoreArbitration:skl,bxt */
1279 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || 1253 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1280 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 1254 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1281 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1255 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1282 1256
1283 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ 1257 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1298,12 +1272,11 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1298 uint32_t *const batch, 1272 uint32_t *const batch,
1299 uint32_t *offset) 1273 uint32_t *offset)
1300{ 1274{
1301 struct drm_device *dev = engine->dev;
1302 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1275 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1303 1276
1304 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 1277 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
1305 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 1278 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
1306 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1279 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
1307 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); 1280 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1308 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); 1281 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
1309 wa_ctx_emit(batch, index, 1282 wa_ctx_emit(batch, index,
@@ -1312,7 +1285,7 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1312 } 1285 }
1313 1286
1314 /* WaClearTdlStateAckDirtyBits:bxt */ 1287 /* WaClearTdlStateAckDirtyBits:bxt */
1315 if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { 1288 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
1316 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4)); 1289 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
1317 1290
1318 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK); 1291 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
@@ -1331,8 +1304,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1331 } 1304 }
1332 1305
1333 /* WaDisableCtxRestoreArbitration:skl,bxt */ 1306 /* WaDisableCtxRestoreArbitration:skl,bxt */
1334 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || 1307 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1335 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 1308 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1336 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); 1309 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1337 1310
1338 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); 1311 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1344,11 +1317,13 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
1344{ 1317{
1345 int ret; 1318 int ret;
1346 1319
1347 engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev, 1320 engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev,
1348 PAGE_ALIGN(size)); 1321 PAGE_ALIGN(size));
1349 if (!engine->wa_ctx.obj) { 1322 if (IS_ERR(engine->wa_ctx.obj)) {
1350 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); 1323 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
1351 return -ENOMEM; 1324 ret = PTR_ERR(engine->wa_ctx.obj);
1325 engine->wa_ctx.obj = NULL;
1326 return ret;
1352 } 1327 }
1353 1328
1354 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0); 1329 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
@@ -1382,9 +1357,9 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1382 WARN_ON(engine->id != RCS); 1357 WARN_ON(engine->id != RCS);
1383 1358
1384 /* update this when WA for higher Gen are added */ 1359 /* update this when WA for higher Gen are added */
1385 if (INTEL_INFO(engine->dev)->gen > 9) { 1360 if (INTEL_GEN(engine->i915) > 9) {
1386 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n", 1361 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
1387 INTEL_INFO(engine->dev)->gen); 1362 INTEL_GEN(engine->i915));
1388 return 0; 1363 return 0;
1389 } 1364 }
1390 1365
@@ -1404,7 +1379,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1404 batch = kmap_atomic(page); 1379 batch = kmap_atomic(page);
1405 offset = 0; 1380 offset = 0;
1406 1381
1407 if (INTEL_INFO(engine->dev)->gen == 8) { 1382 if (IS_GEN8(engine->i915)) {
1408 ret = gen8_init_indirectctx_bb(engine, 1383 ret = gen8_init_indirectctx_bb(engine,
1409 &wa_ctx->indirect_ctx, 1384 &wa_ctx->indirect_ctx,
1410 batch, 1385 batch,
@@ -1418,7 +1393,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1418 &offset); 1393 &offset);
1419 if (ret) 1394 if (ret)
1420 goto out; 1395 goto out;
1421 } else if (INTEL_INFO(engine->dev)->gen == 9) { 1396 } else if (IS_GEN9(engine->i915)) {
1422 ret = gen9_init_indirectctx_bb(engine, 1397 ret = gen9_init_indirectctx_bb(engine,
1423 &wa_ctx->indirect_ctx, 1398 &wa_ctx->indirect_ctx,
1424 batch, 1399 batch,
@@ -1444,7 +1419,7 @@ out:
1444 1419
1445static void lrc_init_hws(struct intel_engine_cs *engine) 1420static void lrc_init_hws(struct intel_engine_cs *engine)
1446{ 1421{
1447 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1422 struct drm_i915_private *dev_priv = engine->i915;
1448 1423
1449 I915_WRITE(RING_HWS_PGA(engine->mmio_base), 1424 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1450 (u32)engine->status_page.gfx_addr); 1425 (u32)engine->status_page.gfx_addr);
@@ -1453,8 +1428,7 @@ static void lrc_init_hws(struct intel_engine_cs *engine)
1453 1428
1454static int gen8_init_common_ring(struct intel_engine_cs *engine) 1429static int gen8_init_common_ring(struct intel_engine_cs *engine)
1455{ 1430{
1456 struct drm_device *dev = engine->dev; 1431 struct drm_i915_private *dev_priv = engine->i915;
1457 struct drm_i915_private *dev_priv = dev->dev_private;
1458 unsigned int next_context_status_buffer_hw; 1432 unsigned int next_context_status_buffer_hw;
1459 1433
1460 lrc_init_hws(engine); 1434 lrc_init_hws(engine);
@@ -1501,8 +1475,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
1501 1475
1502static int gen8_init_render_ring(struct intel_engine_cs *engine) 1476static int gen8_init_render_ring(struct intel_engine_cs *engine)
1503{ 1477{
1504 struct drm_device *dev = engine->dev; 1478 struct drm_i915_private *dev_priv = engine->i915;
1505 struct drm_i915_private *dev_priv = dev->dev_private;
1506 int ret; 1479 int ret;
1507 1480
1508 ret = gen8_init_common_ring(engine); 1481 ret = gen8_init_common_ring(engine);
@@ -1579,7 +1552,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1579 if (req->ctx->ppgtt && 1552 if (req->ctx->ppgtt &&
1580 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) { 1553 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
1581 if (!USES_FULL_48BIT_PPGTT(req->i915) && 1554 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
1582 !intel_vgpu_active(req->i915->dev)) { 1555 !intel_vgpu_active(req->i915)) {
1583 ret = intel_logical_ring_emit_pdps(req); 1556 ret = intel_logical_ring_emit_pdps(req);
1584 if (ret) 1557 if (ret)
1585 return ret; 1558 return ret;
@@ -1607,8 +1580,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1607 1580
1608static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine) 1581static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
1609{ 1582{
1610 struct drm_device *dev = engine->dev; 1583 struct drm_i915_private *dev_priv = engine->i915;
1611 struct drm_i915_private *dev_priv = dev->dev_private;
1612 unsigned long flags; 1584 unsigned long flags;
1613 1585
1614 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1586 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1627,8 +1599,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
1627 1599
1628static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine) 1600static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
1629{ 1601{
1630 struct drm_device *dev = engine->dev; 1602 struct drm_i915_private *dev_priv = engine->i915;
1631 struct drm_i915_private *dev_priv = dev->dev_private;
1632 unsigned long flags; 1603 unsigned long flags;
1633 1604
1634 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1605 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1645,8 +1616,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
1645{ 1616{
1646 struct intel_ringbuffer *ringbuf = request->ringbuf; 1617 struct intel_ringbuffer *ringbuf = request->ringbuf;
1647 struct intel_engine_cs *engine = ringbuf->engine; 1618 struct intel_engine_cs *engine = ringbuf->engine;
1648 struct drm_device *dev = engine->dev; 1619 struct drm_i915_private *dev_priv = request->i915;
1649 struct drm_i915_private *dev_priv = dev->dev_private;
1650 uint32_t cmd; 1620 uint32_t cmd;
1651 int ret; 1621 int ret;
1652 1622
@@ -1714,7 +1684,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1714 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL 1684 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1715 * pipe control. 1685 * pipe control.
1716 */ 1686 */
1717 if (IS_GEN9(engine->dev)) 1687 if (IS_GEN9(request->i915))
1718 vf_flush_wa = true; 1688 vf_flush_wa = true;
1719 } 1689 }
1720 1690
@@ -1782,11 +1752,6 @@ static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1782 */ 1752 */
1783#define WA_TAIL_DWORDS 2 1753#define WA_TAIL_DWORDS 2
1784 1754
1785static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
1786{
1787 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
1788}
1789
1790static int gen8_emit_request(struct drm_i915_gem_request *request) 1755static int gen8_emit_request(struct drm_i915_gem_request *request)
1791{ 1756{
1792 struct intel_ringbuffer *ringbuf = request->ringbuf; 1757 struct intel_ringbuffer *ringbuf = request->ringbuf;
@@ -1802,7 +1767,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
1802 intel_logical_ring_emit(ringbuf, 1767 intel_logical_ring_emit(ringbuf,
1803 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); 1768 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1804 intel_logical_ring_emit(ringbuf, 1769 intel_logical_ring_emit(ringbuf,
1805 hws_seqno_address(request->engine) | 1770 intel_hws_seqno_address(request->engine) |
1806 MI_FLUSH_DW_USE_GTT); 1771 MI_FLUSH_DW_USE_GTT);
1807 intel_logical_ring_emit(ringbuf, 0); 1772 intel_logical_ring_emit(ringbuf, 0);
1808 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); 1773 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
@@ -1832,7 +1797,8 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1832 (PIPE_CONTROL_GLOBAL_GTT_IVB | 1797 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1833 PIPE_CONTROL_CS_STALL | 1798 PIPE_CONTROL_CS_STALL |
1834 PIPE_CONTROL_QW_WRITE)); 1799 PIPE_CONTROL_QW_WRITE));
1835 intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine)); 1800 intel_logical_ring_emit(ringbuf,
1801 intel_hws_seqno_address(request->engine));
1836 intel_logical_ring_emit(ringbuf, 0); 1802 intel_logical_ring_emit(ringbuf, 0);
1837 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); 1803 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
1838 /* We're thrashing one dword of HWS. */ 1804 /* We're thrashing one dword of HWS. */
@@ -1911,7 +1877,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1911 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state))) 1877 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
1912 tasklet_kill(&engine->irq_tasklet); 1878 tasklet_kill(&engine->irq_tasklet);
1913 1879
1914 dev_priv = engine->dev->dev_private; 1880 dev_priv = engine->i915;
1915 1881
1916 if (engine->buffer) { 1882 if (engine->buffer) {
1917 intel_logical_ring_stop(engine); 1883 intel_logical_ring_stop(engine);
@@ -1928,18 +1894,18 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1928 i915_gem_object_unpin_map(engine->status_page.obj); 1894 i915_gem_object_unpin_map(engine->status_page.obj);
1929 engine->status_page.obj = NULL; 1895 engine->status_page.obj = NULL;
1930 } 1896 }
1897 intel_lr_context_unpin(dev_priv->kernel_context, engine);
1931 1898
1932 engine->idle_lite_restore_wa = 0; 1899 engine->idle_lite_restore_wa = 0;
1933 engine->disable_lite_restore_wa = false; 1900 engine->disable_lite_restore_wa = false;
1934 engine->ctx_desc_template = 0; 1901 engine->ctx_desc_template = 0;
1935 1902
1936 lrc_destroy_wa_ctx_obj(engine); 1903 lrc_destroy_wa_ctx_obj(engine);
1937 engine->dev = NULL; 1904 engine->i915 = NULL;
1938} 1905}
1939 1906
1940static void 1907static void
1941logical_ring_default_vfuncs(struct drm_device *dev, 1908logical_ring_default_vfuncs(struct intel_engine_cs *engine)
1942 struct intel_engine_cs *engine)
1943{ 1909{
1944 /* Default vfuncs which can be overriden by each engine. */ 1910 /* Default vfuncs which can be overriden by each engine. */
1945 engine->init_hw = gen8_init_common_ring; 1911 engine->init_hw = gen8_init_common_ring;
@@ -1950,7 +1916,7 @@ logical_ring_default_vfuncs(struct drm_device *dev,
1950 engine->emit_bb_start = gen8_emit_bb_start; 1916 engine->emit_bb_start = gen8_emit_bb_start;
1951 engine->get_seqno = gen8_get_seqno; 1917 engine->get_seqno = gen8_get_seqno;
1952 engine->set_seqno = gen8_set_seqno; 1918 engine->set_seqno = gen8_set_seqno;
1953 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1919 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
1954 engine->irq_seqno_barrier = bxt_a_seqno_barrier; 1920 engine->irq_seqno_barrier = bxt_a_seqno_barrier;
1955 engine->set_seqno = bxt_a_set_seqno; 1921 engine->set_seqno = bxt_a_set_seqno;
1956 } 1922 }
@@ -1961,6 +1927,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
1961{ 1927{
1962 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; 1928 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
1963 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; 1929 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
1930 init_waitqueue_head(&engine->irq_queue);
1964} 1931}
1965 1932
1966static int 1933static int
@@ -1981,32 +1948,68 @@ lrc_setup_hws(struct intel_engine_cs *engine,
1981 return 0; 1948 return 0;
1982} 1949}
1983 1950
1984static int 1951static const struct logical_ring_info {
1985logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine) 1952 const char *name;
1953 unsigned exec_id;
1954 unsigned guc_id;
1955 u32 mmio_base;
1956 unsigned irq_shift;
1957} logical_rings[] = {
1958 [RCS] = {
1959 .name = "render ring",
1960 .exec_id = I915_EXEC_RENDER,
1961 .guc_id = GUC_RENDER_ENGINE,
1962 .mmio_base = RENDER_RING_BASE,
1963 .irq_shift = GEN8_RCS_IRQ_SHIFT,
1964 },
1965 [BCS] = {
1966 .name = "blitter ring",
1967 .exec_id = I915_EXEC_BLT,
1968 .guc_id = GUC_BLITTER_ENGINE,
1969 .mmio_base = BLT_RING_BASE,
1970 .irq_shift = GEN8_BCS_IRQ_SHIFT,
1971 },
1972 [VCS] = {
1973 .name = "bsd ring",
1974 .exec_id = I915_EXEC_BSD,
1975 .guc_id = GUC_VIDEO_ENGINE,
1976 .mmio_base = GEN6_BSD_RING_BASE,
1977 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
1978 },
1979 [VCS2] = {
1980 .name = "bsd2 ring",
1981 .exec_id = I915_EXEC_BSD,
1982 .guc_id = GUC_VIDEO_ENGINE2,
1983 .mmio_base = GEN8_BSD2_RING_BASE,
1984 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
1985 },
1986 [VECS] = {
1987 .name = "video enhancement ring",
1988 .exec_id = I915_EXEC_VEBOX,
1989 .guc_id = GUC_VIDEOENHANCE_ENGINE,
1990 .mmio_base = VEBOX_RING_BASE,
1991 .irq_shift = GEN8_VECS_IRQ_SHIFT,
1992 },
1993};
1994
1995static struct intel_engine_cs *
1996logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
1986{ 1997{
1998 const struct logical_ring_info *info = &logical_rings[id];
1987 struct drm_i915_private *dev_priv = to_i915(dev); 1999 struct drm_i915_private *dev_priv = to_i915(dev);
1988 struct intel_context *dctx = dev_priv->kernel_context; 2000 struct intel_engine_cs *engine = &dev_priv->engine[id];
1989 enum forcewake_domains fw_domains; 2001 enum forcewake_domains fw_domains;
1990 int ret;
1991
1992 /* Intentionally left blank. */
1993 engine->buffer = NULL;
1994
1995 engine->dev = dev;
1996 INIT_LIST_HEAD(&engine->active_list);
1997 INIT_LIST_HEAD(&engine->request_list);
1998 i915_gem_batch_pool_init(dev, &engine->batch_pool);
1999 init_waitqueue_head(&engine->irq_queue);
2000 2002
2001 INIT_LIST_HEAD(&engine->buffers); 2003 engine->id = id;
2002 INIT_LIST_HEAD(&engine->execlist_queue); 2004 engine->name = info->name;
2003 INIT_LIST_HEAD(&engine->execlist_retired_req_list); 2005 engine->exec_id = info->exec_id;
2004 spin_lock_init(&engine->execlist_lock); 2006 engine->guc_id = info->guc_id;
2007 engine->mmio_base = info->mmio_base;
2005 2008
2006 tasklet_init(&engine->irq_tasklet, 2009 engine->i915 = dev_priv;
2007 intel_lrc_irq_handler, (unsigned long)engine);
2008 2010
2009 logical_ring_init_platform_invariants(engine); 2011 /* Intentionally left blank. */
2012 engine->buffer = NULL;
2010 2013
2011 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, 2014 fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
2012 RING_ELSP(engine), 2015 RING_ELSP(engine),
@@ -2022,20 +2025,44 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
2022 2025
2023 engine->fw_domains = fw_domains; 2026 engine->fw_domains = fw_domains;
2024 2027
2028 INIT_LIST_HEAD(&engine->active_list);
2029 INIT_LIST_HEAD(&engine->request_list);
2030 INIT_LIST_HEAD(&engine->buffers);
2031 INIT_LIST_HEAD(&engine->execlist_queue);
2032 spin_lock_init(&engine->execlist_lock);
2033
2034 tasklet_init(&engine->irq_tasklet,
2035 intel_lrc_irq_handler, (unsigned long)engine);
2036
2037 logical_ring_init_platform_invariants(engine);
2038 logical_ring_default_vfuncs(engine);
2039 logical_ring_default_irqs(engine, info->irq_shift);
2040
2041 intel_engine_init_hangcheck(engine);
2042 i915_gem_batch_pool_init(dev, &engine->batch_pool);
2043
2044 return engine;
2045}
2046
2047static int
2048logical_ring_init(struct intel_engine_cs *engine)
2049{
2050 struct i915_gem_context *dctx = engine->i915->kernel_context;
2051 int ret;
2052
2025 ret = i915_cmd_parser_init_ring(engine); 2053 ret = i915_cmd_parser_init_ring(engine);
2026 if (ret) 2054 if (ret)
2027 goto error; 2055 goto error;
2028 2056
2029 ret = intel_lr_context_deferred_alloc(dctx, engine); 2057 ret = execlists_context_deferred_alloc(dctx, engine);
2030 if (ret) 2058 if (ret)
2031 goto error; 2059 goto error;
2032 2060
2033 /* As this is the default context, always pin it */ 2061 /* As this is the default context, always pin it */
2034 ret = intel_lr_context_do_pin(dctx, engine); 2062 ret = intel_lr_context_pin(dctx, engine);
2035 if (ret) { 2063 if (ret) {
2036 DRM_ERROR( 2064 DRM_ERROR("Failed to pin context for %s: %d\n",
2037 "Failed to pin and map ringbuffer %s: %d\n", 2065 engine->name, ret);
2038 engine->name, ret);
2039 goto error; 2066 goto error;
2040 } 2067 }
2041 2068
@@ -2055,22 +2082,12 @@ error:
2055 2082
2056static int logical_render_ring_init(struct drm_device *dev) 2083static int logical_render_ring_init(struct drm_device *dev)
2057{ 2084{
2058 struct drm_i915_private *dev_priv = dev->dev_private; 2085 struct intel_engine_cs *engine = logical_ring_setup(dev, RCS);
2059 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
2060 int ret; 2086 int ret;
2061 2087
2062 engine->name = "render ring";
2063 engine->id = RCS;
2064 engine->exec_id = I915_EXEC_RENDER;
2065 engine->guc_id = GUC_RENDER_ENGINE;
2066 engine->mmio_base = RENDER_RING_BASE;
2067
2068 logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT);
2069 if (HAS_L3_DPF(dev)) 2088 if (HAS_L3_DPF(dev))
2070 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2089 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2071 2090
2072 logical_ring_default_vfuncs(dev, engine);
2073
2074 /* Override some for render ring. */ 2091 /* Override some for render ring. */
2075 if (INTEL_INFO(dev)->gen >= 9) 2092 if (INTEL_INFO(dev)->gen >= 9)
2076 engine->init_hw = gen9_init_render_ring; 2093 engine->init_hw = gen9_init_render_ring;
@@ -2081,8 +2098,6 @@ static int logical_render_ring_init(struct drm_device *dev)
2081 engine->emit_flush = gen8_emit_flush_render; 2098 engine->emit_flush = gen8_emit_flush_render;
2082 engine->emit_request = gen8_emit_request_render; 2099 engine->emit_request = gen8_emit_request_render;
2083 2100
2084 engine->dev = dev;
2085
2086 ret = intel_init_pipe_control(engine); 2101 ret = intel_init_pipe_control(engine);
2087 if (ret) 2102 if (ret)
2088 return ret; 2103 return ret;
@@ -2098,7 +2113,7 @@ static int logical_render_ring_init(struct drm_device *dev)
2098 ret); 2113 ret);
2099 } 2114 }
2100 2115
2101 ret = logical_ring_init(dev, engine); 2116 ret = logical_ring_init(engine);
2102 if (ret) { 2117 if (ret) {
2103 lrc_destroy_wa_ctx_obj(engine); 2118 lrc_destroy_wa_ctx_obj(engine);
2104 } 2119 }
@@ -2108,70 +2123,30 @@ static int logical_render_ring_init(struct drm_device *dev)
2108 2123
2109static int logical_bsd_ring_init(struct drm_device *dev) 2124static int logical_bsd_ring_init(struct drm_device *dev)
2110{ 2125{
2111 struct drm_i915_private *dev_priv = dev->dev_private; 2126 struct intel_engine_cs *engine = logical_ring_setup(dev, VCS);
2112 struct intel_engine_cs *engine = &dev_priv->engine[VCS];
2113
2114 engine->name = "bsd ring";
2115 engine->id = VCS;
2116 engine->exec_id = I915_EXEC_BSD;
2117 engine->guc_id = GUC_VIDEO_ENGINE;
2118 engine->mmio_base = GEN6_BSD_RING_BASE;
2119
2120 logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT);
2121 logical_ring_default_vfuncs(dev, engine);
2122 2127
2123 return logical_ring_init(dev, engine); 2128 return logical_ring_init(engine);
2124} 2129}
2125 2130
2126static int logical_bsd2_ring_init(struct drm_device *dev) 2131static int logical_bsd2_ring_init(struct drm_device *dev)
2127{ 2132{
2128 struct drm_i915_private *dev_priv = dev->dev_private; 2133 struct intel_engine_cs *engine = logical_ring_setup(dev, VCS2);
2129 struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
2130
2131 engine->name = "bsd2 ring";
2132 engine->id = VCS2;
2133 engine->exec_id = I915_EXEC_BSD;
2134 engine->guc_id = GUC_VIDEO_ENGINE2;
2135 engine->mmio_base = GEN8_BSD2_RING_BASE;
2136
2137 logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT);
2138 logical_ring_default_vfuncs(dev, engine);
2139 2134
2140 return logical_ring_init(dev, engine); 2135 return logical_ring_init(engine);
2141} 2136}
2142 2137
2143static int logical_blt_ring_init(struct drm_device *dev) 2138static int logical_blt_ring_init(struct drm_device *dev)
2144{ 2139{
2145 struct drm_i915_private *dev_priv = dev->dev_private; 2140 struct intel_engine_cs *engine = logical_ring_setup(dev, BCS);
2146 struct intel_engine_cs *engine = &dev_priv->engine[BCS];
2147
2148 engine->name = "blitter ring";
2149 engine->id = BCS;
2150 engine->exec_id = I915_EXEC_BLT;
2151 engine->guc_id = GUC_BLITTER_ENGINE;
2152 engine->mmio_base = BLT_RING_BASE;
2153
2154 logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT);
2155 logical_ring_default_vfuncs(dev, engine);
2156 2141
2157 return logical_ring_init(dev, engine); 2142 return logical_ring_init(engine);
2158} 2143}
2159 2144
2160static int logical_vebox_ring_init(struct drm_device *dev) 2145static int logical_vebox_ring_init(struct drm_device *dev)
2161{ 2146{
2162 struct drm_i915_private *dev_priv = dev->dev_private; 2147 struct intel_engine_cs *engine = logical_ring_setup(dev, VECS);
2163 struct intel_engine_cs *engine = &dev_priv->engine[VECS];
2164
2165 engine->name = "video enhancement ring";
2166 engine->id = VECS;
2167 engine->exec_id = I915_EXEC_VEBOX;
2168 engine->guc_id = GUC_VIDEOENHANCE_ENGINE;
2169 engine->mmio_base = VEBOX_RING_BASE;
2170 2148
2171 logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT); 2149 return logical_ring_init(engine);
2172 logical_ring_default_vfuncs(dev, engine);
2173
2174 return logical_ring_init(dev, engine);
2175} 2150}
2176 2151
2177/** 2152/**
@@ -2232,7 +2207,7 @@ cleanup_render_ring:
2232} 2207}
2233 2208
2234static u32 2209static u32
2235make_rpcs(struct drm_device *dev) 2210make_rpcs(struct drm_i915_private *dev_priv)
2236{ 2211{
2237 u32 rpcs = 0; 2212 u32 rpcs = 0;
2238 2213
@@ -2240,7 +2215,7 @@ make_rpcs(struct drm_device *dev)
2240 * No explicit RPCS request is needed to ensure full 2215 * No explicit RPCS request is needed to ensure full
2241 * slice/subslice/EU enablement prior to Gen9. 2216 * slice/subslice/EU enablement prior to Gen9.
2242 */ 2217 */
2243 if (INTEL_INFO(dev)->gen < 9) 2218 if (INTEL_GEN(dev_priv) < 9)
2244 return 0; 2219 return 0;
2245 2220
2246 /* 2221 /*
@@ -2249,24 +2224,24 @@ make_rpcs(struct drm_device *dev)
2249 * must make an explicit request through RPCS for full 2224 * must make an explicit request through RPCS for full
2250 * enablement. 2225 * enablement.
2251 */ 2226 */
2252 if (INTEL_INFO(dev)->has_slice_pg) { 2227 if (INTEL_INFO(dev_priv)->has_slice_pg) {
2253 rpcs |= GEN8_RPCS_S_CNT_ENABLE; 2228 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
2254 rpcs |= INTEL_INFO(dev)->slice_total << 2229 rpcs |= INTEL_INFO(dev_priv)->slice_total <<
2255 GEN8_RPCS_S_CNT_SHIFT; 2230 GEN8_RPCS_S_CNT_SHIFT;
2256 rpcs |= GEN8_RPCS_ENABLE; 2231 rpcs |= GEN8_RPCS_ENABLE;
2257 } 2232 }
2258 2233
2259 if (INTEL_INFO(dev)->has_subslice_pg) { 2234 if (INTEL_INFO(dev_priv)->has_subslice_pg) {
2260 rpcs |= GEN8_RPCS_SS_CNT_ENABLE; 2235 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
2261 rpcs |= INTEL_INFO(dev)->subslice_per_slice << 2236 rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
2262 GEN8_RPCS_SS_CNT_SHIFT; 2237 GEN8_RPCS_SS_CNT_SHIFT;
2263 rpcs |= GEN8_RPCS_ENABLE; 2238 rpcs |= GEN8_RPCS_ENABLE;
2264 } 2239 }
2265 2240
2266 if (INTEL_INFO(dev)->has_eu_pg) { 2241 if (INTEL_INFO(dev_priv)->has_eu_pg) {
2267 rpcs |= INTEL_INFO(dev)->eu_per_subslice << 2242 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
2268 GEN8_RPCS_EU_MIN_SHIFT; 2243 GEN8_RPCS_EU_MIN_SHIFT;
2269 rpcs |= INTEL_INFO(dev)->eu_per_subslice << 2244 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
2270 GEN8_RPCS_EU_MAX_SHIFT; 2245 GEN8_RPCS_EU_MAX_SHIFT;
2271 rpcs |= GEN8_RPCS_ENABLE; 2246 rpcs |= GEN8_RPCS_ENABLE;
2272 } 2247 }
@@ -2278,9 +2253,9 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2278{ 2253{
2279 u32 indirect_ctx_offset; 2254 u32 indirect_ctx_offset;
2280 2255
2281 switch (INTEL_INFO(engine->dev)->gen) { 2256 switch (INTEL_GEN(engine->i915)) {
2282 default: 2257 default:
2283 MISSING_CASE(INTEL_INFO(engine->dev)->gen); 2258 MISSING_CASE(INTEL_GEN(engine->i915));
2284 /* fall through */ 2259 /* fall through */
2285 case 9: 2260 case 9:
2286 indirect_ctx_offset = 2261 indirect_ctx_offset =
@@ -2296,13 +2271,12 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2296} 2271}
2297 2272
2298static int 2273static int
2299populate_lr_context(struct intel_context *ctx, 2274populate_lr_context(struct i915_gem_context *ctx,
2300 struct drm_i915_gem_object *ctx_obj, 2275 struct drm_i915_gem_object *ctx_obj,
2301 struct intel_engine_cs *engine, 2276 struct intel_engine_cs *engine,
2302 struct intel_ringbuffer *ringbuf) 2277 struct intel_ringbuffer *ringbuf)
2303{ 2278{
2304 struct drm_device *dev = engine->dev; 2279 struct drm_i915_private *dev_priv = ctx->i915;
2305 struct drm_i915_private *dev_priv = dev->dev_private;
2306 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2280 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2307 void *vaddr; 2281 void *vaddr;
2308 u32 *reg_state; 2282 u32 *reg_state;
@@ -2340,7 +2314,7 @@ populate_lr_context(struct intel_context *ctx,
2340 RING_CONTEXT_CONTROL(engine), 2314 RING_CONTEXT_CONTROL(engine),
2341 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | 2315 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2342 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | 2316 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2343 (HAS_RESOURCE_STREAMER(dev) ? 2317 (HAS_RESOURCE_STREAMER(dev_priv) ?
2344 CTX_CTRL_RS_CTX_ENABLE : 0))); 2318 CTX_CTRL_RS_CTX_ENABLE : 0)));
2345 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base), 2319 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
2346 0); 2320 0);
@@ -2429,7 +2403,7 @@ populate_lr_context(struct intel_context *ctx,
2429 if (engine->id == RCS) { 2403 if (engine->id == RCS) {
2430 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); 2404 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2431 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 2405 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2432 make_rpcs(dev)); 2406 make_rpcs(dev_priv));
2433 } 2407 }
2434 2408
2435 i915_gem_object_unpin_map(ctx_obj); 2409 i915_gem_object_unpin_map(ctx_obj);
@@ -2438,37 +2412,6 @@ populate_lr_context(struct intel_context *ctx,
2438} 2412}
2439 2413
2440/** 2414/**
2441 * intel_lr_context_free() - free the LRC specific bits of a context
2442 * @ctx: the LR context to free.
2443 *
2444 * The real context freeing is done in i915_gem_context_free: this only
2445 * takes care of the bits that are LRC related: the per-engine backing
2446 * objects and the logical ringbuffer.
2447 */
2448void intel_lr_context_free(struct intel_context *ctx)
2449{
2450 int i;
2451
2452 for (i = I915_NUM_ENGINES; --i >= 0; ) {
2453 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
2454 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
2455
2456 if (!ctx_obj)
2457 continue;
2458
2459 if (ctx == ctx->i915->kernel_context) {
2460 intel_unpin_ringbuffer_obj(ringbuf);
2461 i915_gem_object_ggtt_unpin(ctx_obj);
2462 i915_gem_object_unpin_map(ctx_obj);
2463 }
2464
2465 WARN_ON(ctx->engine[i].pin_count);
2466 intel_ringbuffer_free(ringbuf);
2467 drm_gem_object_unreference(&ctx_obj->base);
2468 }
2469}
2470
2471/**
2472 * intel_lr_context_size() - return the size of the context for an engine 2415 * intel_lr_context_size() - return the size of the context for an engine
2473 * @ring: which engine to find the context size for 2416 * @ring: which engine to find the context size for
2474 * 2417 *
@@ -2486,11 +2429,11 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2486{ 2429{
2487 int ret = 0; 2430 int ret = 0;
2488 2431
2489 WARN_ON(INTEL_INFO(engine->dev)->gen < 8); 2432 WARN_ON(INTEL_GEN(engine->i915) < 8);
2490 2433
2491 switch (engine->id) { 2434 switch (engine->id) {
2492 case RCS: 2435 case RCS:
2493 if (INTEL_INFO(engine->dev)->gen >= 9) 2436 if (INTEL_GEN(engine->i915) >= 9)
2494 ret = GEN9_LR_CONTEXT_RENDER_SIZE; 2437 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2495 else 2438 else
2496 ret = GEN8_LR_CONTEXT_RENDER_SIZE; 2439 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
@@ -2507,9 +2450,9 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2507} 2450}
2508 2451
2509/** 2452/**
2510 * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context 2453 * execlists_context_deferred_alloc() - create the LRC specific bits of a context
2511 * @ctx: LR context to create. 2454 * @ctx: LR context to create.
2512 * @ring: engine to be used with the context. 2455 * @engine: engine to be used with the context.
2513 * 2456 *
2514 * This function can be called more than once, with different engines, if we plan 2457 * This function can be called more than once, with different engines, if we plan
2515 * to use the context with them. The context backing objects and the ringbuffers 2458 * to use the context with them. The context backing objects and the ringbuffers
@@ -2519,28 +2462,26 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2519 * 2462 *
2520 * Return: non-zero on error. 2463 * Return: non-zero on error.
2521 */ 2464 */
2522 2465static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2523int intel_lr_context_deferred_alloc(struct intel_context *ctx, 2466 struct intel_engine_cs *engine)
2524 struct intel_engine_cs *engine)
2525{ 2467{
2526 struct drm_device *dev = engine->dev;
2527 struct drm_i915_gem_object *ctx_obj; 2468 struct drm_i915_gem_object *ctx_obj;
2469 struct intel_context *ce = &ctx->engine[engine->id];
2528 uint32_t context_size; 2470 uint32_t context_size;
2529 struct intel_ringbuffer *ringbuf; 2471 struct intel_ringbuffer *ringbuf;
2530 int ret; 2472 int ret;
2531 2473
2532 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); 2474 WARN_ON(ce->state);
2533 WARN_ON(ctx->engine[engine->id].state);
2534 2475
2535 context_size = round_up(intel_lr_context_size(engine), 4096); 2476 context_size = round_up(intel_lr_context_size(engine), 4096);
2536 2477
2537 /* One extra page as the sharing data between driver and GuC */ 2478 /* One extra page as the sharing data between driver and GuC */
2538 context_size += PAGE_SIZE * LRC_PPHWSP_PN; 2479 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2539 2480
2540 ctx_obj = i915_gem_alloc_object(dev, context_size); 2481 ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size);
2541 if (!ctx_obj) { 2482 if (IS_ERR(ctx_obj)) {
2542 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); 2483 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2543 return -ENOMEM; 2484 return PTR_ERR(ctx_obj);
2544 } 2485 }
2545 2486
2546 ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE); 2487 ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
@@ -2555,48 +2496,29 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
2555 goto error_ringbuf; 2496 goto error_ringbuf;
2556 } 2497 }
2557 2498
2558 ctx->engine[engine->id].ringbuf = ringbuf; 2499 ce->ringbuf = ringbuf;
2559 ctx->engine[engine->id].state = ctx_obj; 2500 ce->state = ctx_obj;
2501 ce->initialised = engine->init_context == NULL;
2560 2502
2561 if (ctx != ctx->i915->kernel_context && engine->init_context) {
2562 struct drm_i915_gem_request *req;
2563
2564 req = i915_gem_request_alloc(engine, ctx);
2565 if (IS_ERR(req)) {
2566 ret = PTR_ERR(req);
2567 DRM_ERROR("ring create req: %d\n", ret);
2568 goto error_ringbuf;
2569 }
2570
2571 ret = engine->init_context(req);
2572 i915_add_request_no_flush(req);
2573 if (ret) {
2574 DRM_ERROR("ring init context: %d\n",
2575 ret);
2576 goto error_ringbuf;
2577 }
2578 }
2579 return 0; 2503 return 0;
2580 2504
2581error_ringbuf: 2505error_ringbuf:
2582 intel_ringbuffer_free(ringbuf); 2506 intel_ringbuffer_free(ringbuf);
2583error_deref_obj: 2507error_deref_obj:
2584 drm_gem_object_unreference(&ctx_obj->base); 2508 drm_gem_object_unreference(&ctx_obj->base);
2585 ctx->engine[engine->id].ringbuf = NULL; 2509 ce->ringbuf = NULL;
2586 ctx->engine[engine->id].state = NULL; 2510 ce->state = NULL;
2587 return ret; 2511 return ret;
2588} 2512}
2589 2513
2590void intel_lr_context_reset(struct drm_i915_private *dev_priv, 2514void intel_lr_context_reset(struct drm_i915_private *dev_priv,
2591 struct intel_context *ctx) 2515 struct i915_gem_context *ctx)
2592{ 2516{
2593 struct intel_engine_cs *engine; 2517 struct intel_engine_cs *engine;
2594 2518
2595 for_each_engine(engine, dev_priv) { 2519 for_each_engine(engine, dev_priv) {
2596 struct drm_i915_gem_object *ctx_obj = 2520 struct intel_context *ce = &ctx->engine[engine->id];
2597 ctx->engine[engine->id].state; 2521 struct drm_i915_gem_object *ctx_obj = ce->state;
2598 struct intel_ringbuffer *ringbuf =
2599 ctx->engine[engine->id].ringbuf;
2600 void *vaddr; 2522 void *vaddr;
2601 uint32_t *reg_state; 2523 uint32_t *reg_state;
2602 2524
@@ -2615,7 +2537,7 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
2615 2537
2616 i915_gem_object_unpin_map(ctx_obj); 2538 i915_gem_object_unpin_map(ctx_obj);
2617 2539
2618 ringbuf->head = 0; 2540 ce->ringbuf->head = 0;
2619 ringbuf->tail = 0; 2541 ce->ringbuf->tail = 0;
2620 } 2542 }
2621} 2543}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 60a7385bc531..a8db42a9c50f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -99,30 +99,27 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
99#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1) 99#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1)
100#define LRC_STATE_PN (LRC_PPHWSP_PN + 1) 100#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
101 101
102void intel_lr_context_free(struct intel_context *ctx); 102struct i915_gem_context;
103
103uint32_t intel_lr_context_size(struct intel_engine_cs *engine); 104uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
104int intel_lr_context_deferred_alloc(struct intel_context *ctx, 105void intel_lr_context_unpin(struct i915_gem_context *ctx,
105 struct intel_engine_cs *engine);
106void intel_lr_context_unpin(struct intel_context *ctx,
107 struct intel_engine_cs *engine); 106 struct intel_engine_cs *engine);
108 107
109struct drm_i915_private; 108struct drm_i915_private;
110 109
111void intel_lr_context_reset(struct drm_i915_private *dev_priv, 110void intel_lr_context_reset(struct drm_i915_private *dev_priv,
112 struct intel_context *ctx); 111 struct i915_gem_context *ctx);
113uint64_t intel_lr_context_descriptor(struct intel_context *ctx, 112uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
114 struct intel_engine_cs *engine); 113 struct intel_engine_cs *engine);
115 114
116u32 intel_execlists_ctx_id(struct intel_context *ctx,
117 struct intel_engine_cs *engine);
118
119/* Execlists */ 115/* Execlists */
120int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); 116int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
117 int enable_execlists);
121struct i915_execbuffer_params; 118struct i915_execbuffer_params;
122int intel_execlists_submission(struct i915_execbuffer_params *params, 119int intel_execlists_submission(struct i915_execbuffer_params *params,
123 struct drm_i915_gem_execbuffer2 *args, 120 struct drm_i915_gem_execbuffer2 *args,
124 struct list_head *vmas); 121 struct list_head *vmas);
125 122
126void intel_execlists_retire_requests(struct intel_engine_cs *engine); 123void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
127 124
128#endif /* _INTEL_LRC_H_ */ 125#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index bc53c0dd34d0..e06b9036bebc 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -190,7 +190,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
190 /* Set the dithering flag on LVDS as needed, note that there is no 190 /* Set the dithering flag on LVDS as needed, note that there is no
191 * special lvds dither control bit on pch-split platforms, dithering is 191 * special lvds dither control bit on pch-split platforms, dithering is
192 * only controlled through the PIPECONF reg. */ 192 * only controlled through the PIPECONF reg. */
193 if (INTEL_INFO(dev)->gen == 4) { 193 if (IS_GEN4(dev_priv)) {
194 /* Bspec wording suggests that LVDS port dithering only exists 194 /* Bspec wording suggests that LVDS port dithering only exists
195 * for 18bpp panels. */ 195 * for 18bpp panels. */
196 if (crtc->config->dither && crtc->config->pipe_bpp == 18) 196 if (crtc->config->dither && crtc->config->pipe_bpp == 18)
@@ -547,7 +547,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
547static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { 547static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
548 .get_modes = intel_lvds_get_modes, 548 .get_modes = intel_lvds_get_modes,
549 .mode_valid = intel_lvds_mode_valid, 549 .mode_valid = intel_lvds_mode_valid,
550 .best_encoder = intel_best_encoder,
551}; 550};
552 551
553static const struct drm_connector_funcs intel_lvds_connector_funcs = { 552static const struct drm_connector_funcs intel_lvds_connector_funcs = {
@@ -978,7 +977,7 @@ void intel_lvds_init(struct drm_device *dev)
978 DRM_MODE_CONNECTOR_LVDS); 977 DRM_MODE_CONNECTOR_LVDS);
979 978
980 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, 979 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
981 DRM_MODE_ENCODER_LVDS, NULL); 980 DRM_MODE_ENCODER_LVDS, "LVDS");
982 981
983 intel_encoder->enable = intel_enable_lvds; 982 intel_encoder->enable = intel_enable_lvds;
984 intel_encoder->pre_enable = intel_pre_enable_lvds; 983 intel_encoder->pre_enable = intel_pre_enable_lvds;
@@ -1082,6 +1081,8 @@ void intel_lvds_init(struct drm_device *dev)
1082 fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); 1081 fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
1083 if (fixed_mode) { 1082 if (fixed_mode) {
1084 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 1083 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
1084 connector->display_info.width_mm = fixed_mode->width_mm;
1085 connector->display_info.height_mm = fixed_mode->height_mm;
1085 goto out; 1086 goto out;
1086 } 1087 }
1087 } 1088 }
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 6ba4bf7f2a89..b765c75f3fcd 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -189,7 +189,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
189 */ 189 */
190int intel_mocs_init_engine(struct intel_engine_cs *engine) 190int intel_mocs_init_engine(struct intel_engine_cs *engine)
191{ 191{
192 struct drm_i915_private *dev_priv = to_i915(engine->dev); 192 struct drm_i915_private *dev_priv = engine->i915;
193 struct drm_i915_mocs_table table; 193 struct drm_i915_mocs_table table;
194 unsigned int index; 194 unsigned int index;
195 195
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 99e26034ae8d..f6d8a21d2c49 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -240,10 +240,11 @@ struct opregion_asle_ext {
240 240
241#define MAX_DSLP 1500 241#define MAX_DSLP 1500
242 242
243static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) 243static int swsci(struct drm_i915_private *dev_priv,
244 u32 function, u32 parm, u32 *parm_out)
244{ 245{
245 struct drm_i915_private *dev_priv = dev->dev_private;
246 struct opregion_swsci *swsci = dev_priv->opregion.swsci; 246 struct opregion_swsci *swsci = dev_priv->opregion.swsci;
247 struct pci_dev *pdev = dev_priv->dev->pdev;
247 u32 main_function, sub_function, scic; 248 u32 main_function, sub_function, scic;
248 u16 swsci_val; 249 u16 swsci_val;
249 u32 dslp; 250 u32 dslp;
@@ -293,16 +294,16 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
293 swsci->scic = scic; 294 swsci->scic = scic;
294 295
295 /* Ensure SCI event is selected and event trigger is cleared. */ 296 /* Ensure SCI event is selected and event trigger is cleared. */
296 pci_read_config_word(dev->pdev, SWSCI, &swsci_val); 297 pci_read_config_word(pdev, SWSCI, &swsci_val);
297 if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) { 298 if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) {
298 swsci_val |= SWSCI_SCISEL; 299 swsci_val |= SWSCI_SCISEL;
299 swsci_val &= ~SWSCI_GSSCIE; 300 swsci_val &= ~SWSCI_GSSCIE;
300 pci_write_config_word(dev->pdev, SWSCI, swsci_val); 301 pci_write_config_word(pdev, SWSCI, swsci_val);
301 } 302 }
302 303
303 /* Use event trigger to tell bios to check the mail. */ 304 /* Use event trigger to tell bios to check the mail. */
304 swsci_val |= SWSCI_GSSCIE; 305 swsci_val |= SWSCI_GSSCIE;
305 pci_write_config_word(dev->pdev, SWSCI, swsci_val); 306 pci_write_config_word(pdev, SWSCI, swsci_val);
306 307
307 /* Poll for the result. */ 308 /* Poll for the result. */
308#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0) 309#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
@@ -336,13 +337,13 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
336int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 337int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
337 bool enable) 338 bool enable)
338{ 339{
339 struct drm_device *dev = intel_encoder->base.dev; 340 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
340 u32 parm = 0; 341 u32 parm = 0;
341 u32 type = 0; 342 u32 type = 0;
342 u32 port; 343 u32 port;
343 344
344 /* don't care about old stuff for now */ 345 /* don't care about old stuff for now */
345 if (!HAS_DDI(dev)) 346 if (!HAS_DDI(dev_priv))
346 return 0; 347 return 0;
347 348
348 if (intel_encoder->type == INTEL_OUTPUT_DSI) 349 if (intel_encoder->type == INTEL_OUTPUT_DSI)
@@ -382,7 +383,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
382 383
383 parm |= type << (16 + port * 3); 384 parm |= type << (16 + port * 3);
384 385
385 return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL); 386 return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
386} 387}
387 388
388static const struct { 389static const struct {
@@ -396,27 +397,28 @@ static const struct {
396 { PCI_D3cold, 0x04 }, 397 { PCI_D3cold, 0x04 },
397}; 398};
398 399
399int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) 400int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
401 pci_power_t state)
400{ 402{
401 int i; 403 int i;
402 404
403 if (!HAS_DDI(dev)) 405 if (!HAS_DDI(dev_priv))
404 return 0; 406 return 0;
405 407
406 for (i = 0; i < ARRAY_SIZE(power_state_map); i++) { 408 for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
407 if (state == power_state_map[i].pci_power_state) 409 if (state == power_state_map[i].pci_power_state)
408 return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE, 410 return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE,
409 power_state_map[i].parm, NULL); 411 power_state_map[i].parm, NULL);
410 } 412 }
411 413
412 return -EINVAL; 414 return -EINVAL;
413} 415}
414 416
415static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 417static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
416{ 418{
417 struct drm_i915_private *dev_priv = dev->dev_private;
418 struct intel_connector *connector; 419 struct intel_connector *connector;
419 struct opregion_asle *asle = dev_priv->opregion.asle; 420 struct opregion_asle *asle = dev_priv->opregion.asle;
421 struct drm_device *dev = dev_priv->dev;
420 422
421 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); 423 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
422 424
@@ -449,7 +451,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
449 return 0; 451 return 0;
450} 452}
451 453
452static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) 454static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
453{ 455{
454 /* alsi is the current ALS reading in lux. 0 indicates below sensor 456 /* alsi is the current ALS reading in lux. 0 indicates below sensor
455 range, 0xffff indicates above sensor range. 1-0xfffe are valid */ 457 range, 0xffff indicates above sensor range. 1-0xfffe are valid */
@@ -457,13 +459,13 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
457 return ASLC_ALS_ILLUM_FAILED; 459 return ASLC_ALS_ILLUM_FAILED;
458} 460}
459 461
460static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) 462static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb)
461{ 463{
462 DRM_DEBUG_DRIVER("PWM freq is not supported\n"); 464 DRM_DEBUG_DRIVER("PWM freq is not supported\n");
463 return ASLC_PWM_FREQ_FAILED; 465 return ASLC_PWM_FREQ_FAILED;
464} 466}
465 467
466static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) 468static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
467{ 469{
468 /* Panel fitting is currently controlled by the X code, so this is a 470 /* Panel fitting is currently controlled by the X code, so this is a
469 noop until modesetting support works fully */ 471 noop until modesetting support works fully */
@@ -471,13 +473,13 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
471 return ASLC_PFIT_FAILED; 473 return ASLC_PFIT_FAILED;
472} 474}
473 475
474static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot) 476static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot)
475{ 477{
476 DRM_DEBUG_DRIVER("SROT is not supported\n"); 478 DRM_DEBUG_DRIVER("SROT is not supported\n");
477 return ASLC_ROTATION_ANGLES_FAILED; 479 return ASLC_ROTATION_ANGLES_FAILED;
478} 480}
479 481
480static u32 asle_set_button_array(struct drm_device *dev, u32 iuer) 482static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
481{ 483{
482 if (!iuer) 484 if (!iuer)
483 DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n"); 485 DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
@@ -495,7 +497,7 @@ static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
495 return ASLC_BUTTON_ARRAY_FAILED; 497 return ASLC_BUTTON_ARRAY_FAILED;
496} 498}
497 499
498static u32 asle_set_convertible(struct drm_device *dev, u32 iuer) 500static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
499{ 501{
500 if (iuer & ASLE_IUER_CONVERTIBLE) 502 if (iuer & ASLE_IUER_CONVERTIBLE)
501 DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n"); 503 DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
@@ -505,7 +507,7 @@ static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
505 return ASLC_CONVERTIBLE_FAILED; 507 return ASLC_CONVERTIBLE_FAILED;
506} 508}
507 509
508static u32 asle_set_docking(struct drm_device *dev, u32 iuer) 510static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
509{ 511{
510 if (iuer & ASLE_IUER_DOCKING) 512 if (iuer & ASLE_IUER_DOCKING)
511 DRM_DEBUG_DRIVER("Docking is not supported (docked)\n"); 513 DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
@@ -515,7 +517,7 @@ static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
515 return ASLC_DOCKING_FAILED; 517 return ASLC_DOCKING_FAILED;
516} 518}
517 519
518static u32 asle_isct_state(struct drm_device *dev) 520static u32 asle_isct_state(struct drm_i915_private *dev_priv)
519{ 521{
520 DRM_DEBUG_DRIVER("ISCT is not supported\n"); 522 DRM_DEBUG_DRIVER("ISCT is not supported\n");
521 return ASLC_ISCT_STATE_FAILED; 523 return ASLC_ISCT_STATE_FAILED;
@@ -527,7 +529,6 @@ static void asle_work(struct work_struct *work)
527 container_of(work, struct intel_opregion, asle_work); 529 container_of(work, struct intel_opregion, asle_work);
528 struct drm_i915_private *dev_priv = 530 struct drm_i915_private *dev_priv =
529 container_of(opregion, struct drm_i915_private, opregion); 531 container_of(opregion, struct drm_i915_private, opregion);
530 struct drm_device *dev = dev_priv->dev;
531 struct opregion_asle *asle = dev_priv->opregion.asle; 532 struct opregion_asle *asle = dev_priv->opregion.asle;
532 u32 aslc_stat = 0; 533 u32 aslc_stat = 0;
533 u32 aslc_req; 534 u32 aslc_req;
@@ -544,40 +545,38 @@ static void asle_work(struct work_struct *work)
544 } 545 }
545 546
546 if (aslc_req & ASLC_SET_ALS_ILLUM) 547 if (aslc_req & ASLC_SET_ALS_ILLUM)
547 aslc_stat |= asle_set_als_illum(dev, asle->alsi); 548 aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi);
548 549
549 if (aslc_req & ASLC_SET_BACKLIGHT) 550 if (aslc_req & ASLC_SET_BACKLIGHT)
550 aslc_stat |= asle_set_backlight(dev, asle->bclp); 551 aslc_stat |= asle_set_backlight(dev_priv, asle->bclp);
551 552
552 if (aslc_req & ASLC_SET_PFIT) 553 if (aslc_req & ASLC_SET_PFIT)
553 aslc_stat |= asle_set_pfit(dev, asle->pfit); 554 aslc_stat |= asle_set_pfit(dev_priv, asle->pfit);
554 555
555 if (aslc_req & ASLC_SET_PWM_FREQ) 556 if (aslc_req & ASLC_SET_PWM_FREQ)
556 aslc_stat |= asle_set_pwm_freq(dev, asle->pfmb); 557 aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb);
557 558
558 if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES) 559 if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
559 aslc_stat |= asle_set_supported_rotation_angles(dev, 560 aslc_stat |= asle_set_supported_rotation_angles(dev_priv,
560 asle->srot); 561 asle->srot);
561 562
562 if (aslc_req & ASLC_BUTTON_ARRAY) 563 if (aslc_req & ASLC_BUTTON_ARRAY)
563 aslc_stat |= asle_set_button_array(dev, asle->iuer); 564 aslc_stat |= asle_set_button_array(dev_priv, asle->iuer);
564 565
565 if (aslc_req & ASLC_CONVERTIBLE_INDICATOR) 566 if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
566 aslc_stat |= asle_set_convertible(dev, asle->iuer); 567 aslc_stat |= asle_set_convertible(dev_priv, asle->iuer);
567 568
568 if (aslc_req & ASLC_DOCKING_INDICATOR) 569 if (aslc_req & ASLC_DOCKING_INDICATOR)
569 aslc_stat |= asle_set_docking(dev, asle->iuer); 570 aslc_stat |= asle_set_docking(dev_priv, asle->iuer);
570 571
571 if (aslc_req & ASLC_ISCT_STATE_CHANGE) 572 if (aslc_req & ASLC_ISCT_STATE_CHANGE)
572 aslc_stat |= asle_isct_state(dev); 573 aslc_stat |= asle_isct_state(dev_priv);
573 574
574 asle->aslc = aslc_stat; 575 asle->aslc = aslc_stat;
575} 576}
576 577
577void intel_opregion_asle_intr(struct drm_device *dev) 578void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
578{ 579{
579 struct drm_i915_private *dev_priv = dev->dev_private;
580
581 if (dev_priv->opregion.asle) 580 if (dev_priv->opregion.asle)
582 schedule_work(&dev_priv->opregion.asle_work); 581 schedule_work(&dev_priv->opregion.asle_work);
583} 582}
@@ -658,10 +657,10 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
658 } 657 }
659} 658}
660 659
661static void intel_didl_outputs(struct drm_device *dev) 660static void intel_didl_outputs(struct drm_i915_private *dev_priv)
662{ 661{
663 struct drm_i915_private *dev_priv = dev->dev_private;
664 struct intel_opregion *opregion = &dev_priv->opregion; 662 struct intel_opregion *opregion = &dev_priv->opregion;
663 struct pci_dev *pdev = dev_priv->dev->pdev;
665 struct drm_connector *connector; 664 struct drm_connector *connector;
666 acpi_handle handle; 665 acpi_handle handle;
667 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; 666 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
@@ -670,7 +669,7 @@ static void intel_didl_outputs(struct drm_device *dev)
670 u32 temp, max_outputs; 669 u32 temp, max_outputs;
671 int i = 0; 670 int i = 0;
672 671
673 handle = ACPI_HANDLE(&dev->pdev->dev); 672 handle = ACPI_HANDLE(&pdev->dev);
674 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) 673 if (!handle || acpi_bus_get_device(handle, &acpi_dev))
675 return; 674 return;
676 675
@@ -725,7 +724,7 @@ end:
725 724
726blind_set: 725blind_set:
727 i = 0; 726 i = 0;
728 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 727 list_for_each_entry(connector, &dev_priv->dev->mode_config.connector_list, head) {
729 int output_type = ACPI_OTHER_OUTPUT; 728 int output_type = ACPI_OTHER_OUTPUT;
730 if (i >= max_outputs) { 729 if (i >= max_outputs) {
731 DRM_DEBUG_KMS("More than %u outputs in connector list\n", 730 DRM_DEBUG_KMS("More than %u outputs in connector list\n",
@@ -761,9 +760,8 @@ blind_set:
761 goto end; 760 goto end;
762} 761}
763 762
764static void intel_setup_cadls(struct drm_device *dev) 763static void intel_setup_cadls(struct drm_i915_private *dev_priv)
765{ 764{
766 struct drm_i915_private *dev_priv = dev->dev_private;
767 struct intel_opregion *opregion = &dev_priv->opregion; 765 struct intel_opregion *opregion = &dev_priv->opregion;
768 int i = 0; 766 int i = 0;
769 u32 disp_id; 767 u32 disp_id;
@@ -780,17 +778,16 @@ static void intel_setup_cadls(struct drm_device *dev)
780 } while (++i < 8 && disp_id != 0); 778 } while (++i < 8 && disp_id != 0);
781} 779}
782 780
783void intel_opregion_init(struct drm_device *dev) 781void intel_opregion_register(struct drm_i915_private *dev_priv)
784{ 782{
785 struct drm_i915_private *dev_priv = dev->dev_private;
786 struct intel_opregion *opregion = &dev_priv->opregion; 783 struct intel_opregion *opregion = &dev_priv->opregion;
787 784
788 if (!opregion->header) 785 if (!opregion->header)
789 return; 786 return;
790 787
791 if (opregion->acpi) { 788 if (opregion->acpi) {
792 intel_didl_outputs(dev); 789 intel_didl_outputs(dev_priv);
793 intel_setup_cadls(dev); 790 intel_setup_cadls(dev_priv);
794 791
795 /* Notify BIOS we are ready to handle ACPI video ext notifs. 792 /* Notify BIOS we are ready to handle ACPI video ext notifs.
796 * Right now, all the events are handled by the ACPI video module. 793 * Right now, all the events are handled by the ACPI video module.
@@ -808,9 +805,8 @@ void intel_opregion_init(struct drm_device *dev)
808 } 805 }
809} 806}
810 807
811void intel_opregion_fini(struct drm_device *dev) 808void intel_opregion_unregister(struct drm_i915_private *dev_priv)
812{ 809{
813 struct drm_i915_private *dev_priv = dev->dev_private;
814 struct intel_opregion *opregion = &dev_priv->opregion; 810 struct intel_opregion *opregion = &dev_priv->opregion;
815 811
816 if (!opregion->header) 812 if (!opregion->header)
@@ -842,9 +838,8 @@ void intel_opregion_fini(struct drm_device *dev)
842 opregion->lid_state = NULL; 838 opregion->lid_state = NULL;
843} 839}
844 840
845static void swsci_setup(struct drm_device *dev) 841static void swsci_setup(struct drm_i915_private *dev_priv)
846{ 842{
847 struct drm_i915_private *dev_priv = dev->dev_private;
848 struct intel_opregion *opregion = &dev_priv->opregion; 843 struct intel_opregion *opregion = &dev_priv->opregion;
849 bool requested_callbacks = false; 844 bool requested_callbacks = false;
850 u32 tmp; 845 u32 tmp;
@@ -854,7 +849,7 @@ static void swsci_setup(struct drm_device *dev)
854 opregion->swsci_sbcb_sub_functions = 1; 849 opregion->swsci_sbcb_sub_functions = 1;
855 850
856 /* We use GBDA to ask for supported GBDA calls. */ 851 /* We use GBDA to ask for supported GBDA calls. */
857 if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) { 852 if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
858 /* make the bits match the sub-function codes */ 853 /* make the bits match the sub-function codes */
859 tmp <<= 1; 854 tmp <<= 1;
860 opregion->swsci_gbda_sub_functions |= tmp; 855 opregion->swsci_gbda_sub_functions |= tmp;
@@ -865,7 +860,7 @@ static void swsci_setup(struct drm_device *dev)
865 * must not call interfaces that are not specifically requested by the 860 * must not call interfaces that are not specifically requested by the
866 * bios. 861 * bios.
867 */ 862 */
868 if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) { 863 if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
869 /* here, the bits already match sub-function codes */ 864 /* here, the bits already match sub-function codes */
870 opregion->swsci_sbcb_sub_functions |= tmp; 865 opregion->swsci_sbcb_sub_functions |= tmp;
871 requested_callbacks = true; 866 requested_callbacks = true;
@@ -876,7 +871,7 @@ static void swsci_setup(struct drm_device *dev)
876 * the callback is _requested_. But we still can't call interfaces that 871 * the callback is _requested_. But we still can't call interfaces that
877 * are not requested. 872 * are not requested.
878 */ 873 */
879 if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) { 874 if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
880 /* make the bits match the sub-function codes */ 875 /* make the bits match the sub-function codes */
881 u32 low = tmp & 0x7ff; 876 u32 low = tmp & 0x7ff;
882 u32 high = tmp & ~0xfff; /* bit 11 is reserved */ 877 u32 high = tmp & ~0xfff; /* bit 11 is reserved */
@@ -918,10 +913,10 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
918 { } 913 { }
919}; 914};
920 915
921int intel_opregion_setup(struct drm_device *dev) 916int intel_opregion_setup(struct drm_i915_private *dev_priv)
922{ 917{
923 struct drm_i915_private *dev_priv = dev->dev_private;
924 struct intel_opregion *opregion = &dev_priv->opregion; 918 struct intel_opregion *opregion = &dev_priv->opregion;
919 struct pci_dev *pdev = dev_priv->dev->pdev;
925 u32 asls, mboxes; 920 u32 asls, mboxes;
926 char buf[sizeof(OPREGION_SIGNATURE)]; 921 char buf[sizeof(OPREGION_SIGNATURE)];
927 int err = 0; 922 int err = 0;
@@ -933,7 +928,7 @@ int intel_opregion_setup(struct drm_device *dev)
933 BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100); 928 BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
934 BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400); 929 BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
935 930
936 pci_read_config_dword(dev->pdev, ASLS, &asls); 931 pci_read_config_dword(pdev, ASLS, &asls);
937 DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); 932 DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
938 if (asls == 0) { 933 if (asls == 0) {
939 DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n"); 934 DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
@@ -965,7 +960,7 @@ int intel_opregion_setup(struct drm_device *dev)
965 if (mboxes & MBOX_SWSCI) { 960 if (mboxes & MBOX_SWSCI) {
966 DRM_DEBUG_DRIVER("SWSCI supported\n"); 961 DRM_DEBUG_DRIVER("SWSCI supported\n");
967 opregion->swsci = base + OPREGION_SWSCI_OFFSET; 962 opregion->swsci = base + OPREGION_SWSCI_OFFSET;
968 swsci_setup(dev); 963 swsci_setup(dev_priv);
969 } 964 }
970 965
971 if (mboxes & MBOX_ASLE) { 966 if (mboxes & MBOX_ASLE) {
@@ -1014,12 +1009,12 @@ err_out:
1014} 1009}
1015 1010
1016int 1011int
1017intel_opregion_get_panel_type(struct drm_device *dev) 1012intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
1018{ 1013{
1019 u32 panel_details; 1014 u32 panel_details;
1020 int ret; 1015 int ret;
1021 1016
1022 ret = swsci(dev, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details); 1017 ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
1023 if (ret) { 1018 if (ret) {
1024 DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n", 1019 DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
1025 ret); 1020 ret);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index bd38e49f7334..eb93f90bb74d 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -168,7 +168,7 @@ struct overlay_registers {
168}; 168};
169 169
170struct intel_overlay { 170struct intel_overlay {
171 struct drm_device *dev; 171 struct drm_i915_private *i915;
172 struct intel_crtc *crtc; 172 struct intel_crtc *crtc;
173 struct drm_i915_gem_object *vid_bo; 173 struct drm_i915_gem_object *vid_bo;
174 struct drm_i915_gem_object *old_vid_bo; 174 struct drm_i915_gem_object *old_vid_bo;
@@ -190,15 +190,15 @@ struct intel_overlay {
190static struct overlay_registers __iomem * 190static struct overlay_registers __iomem *
191intel_overlay_map_regs(struct intel_overlay *overlay) 191intel_overlay_map_regs(struct intel_overlay *overlay)
192{ 192{
193 struct drm_i915_private *dev_priv = to_i915(overlay->dev); 193 struct drm_i915_private *dev_priv = overlay->i915;
194 struct i915_ggtt *ggtt = &dev_priv->ggtt;
195 struct overlay_registers __iomem *regs; 194 struct overlay_registers __iomem *regs;
196 195
197 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 196 if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
198 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; 197 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
199 else 198 else
200 regs = io_mapping_map_wc(ggtt->mappable, 199 regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
201 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 200 overlay->flip_addr,
201 PAGE_SIZE);
202 202
203 return regs; 203 return regs;
204} 204}
@@ -206,7 +206,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
206static void intel_overlay_unmap_regs(struct intel_overlay *overlay, 206static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
207 struct overlay_registers __iomem *regs) 207 struct overlay_registers __iomem *regs)
208{ 208{
209 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 209 if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
210 io_mapping_unmap(regs); 210 io_mapping_unmap(regs);
211} 211}
212 212
@@ -232,14 +232,13 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
232/* overlay needs to be disable in OCMD reg */ 232/* overlay needs to be disable in OCMD reg */
233static int intel_overlay_on(struct intel_overlay *overlay) 233static int intel_overlay_on(struct intel_overlay *overlay)
234{ 234{
235 struct drm_device *dev = overlay->dev; 235 struct drm_i915_private *dev_priv = overlay->i915;
236 struct drm_i915_private *dev_priv = dev->dev_private;
237 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 236 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
238 struct drm_i915_gem_request *req; 237 struct drm_i915_gem_request *req;
239 int ret; 238 int ret;
240 239
241 WARN_ON(overlay->active); 240 WARN_ON(overlay->active);
242 WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); 241 WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
243 242
244 req = i915_gem_request_alloc(engine, NULL); 243 req = i915_gem_request_alloc(engine, NULL);
245 if (IS_ERR(req)) 244 if (IS_ERR(req))
@@ -266,8 +265,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
266static int intel_overlay_continue(struct intel_overlay *overlay, 265static int intel_overlay_continue(struct intel_overlay *overlay,
267 bool load_polyphase_filter) 266 bool load_polyphase_filter)
268{ 267{
269 struct drm_device *dev = overlay->dev; 268 struct drm_i915_private *dev_priv = overlay->i915;
270 struct drm_i915_private *dev_priv = dev->dev_private;
271 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 269 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
272 struct drm_i915_gem_request *req; 270 struct drm_i915_gem_request *req;
273 u32 flip_addr = overlay->flip_addr; 271 u32 flip_addr = overlay->flip_addr;
@@ -335,8 +333,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
335/* overlay needs to be disabled in OCMD reg */ 333/* overlay needs to be disabled in OCMD reg */
336static int intel_overlay_off(struct intel_overlay *overlay) 334static int intel_overlay_off(struct intel_overlay *overlay)
337{ 335{
338 struct drm_device *dev = overlay->dev; 336 struct drm_i915_private *dev_priv = overlay->i915;
339 struct drm_i915_private *dev_priv = dev->dev_private;
340 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 337 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
341 struct drm_i915_gem_request *req; 338 struct drm_i915_gem_request *req;
342 u32 flip_addr = overlay->flip_addr; 339 u32 flip_addr = overlay->flip_addr;
@@ -365,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
365 intel_ring_emit(engine, flip_addr); 362 intel_ring_emit(engine, flip_addr);
366 intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 363 intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
367 /* turn overlay off */ 364 /* turn overlay off */
368 if (IS_I830(dev)) { 365 if (IS_I830(dev_priv)) {
369 /* Workaround: Don't disable the overlay fully, since otherwise 366 /* Workaround: Don't disable the overlay fully, since otherwise
370 * it dies on the next OVERLAY_ON cmd. */ 367 * it dies on the next OVERLAY_ON cmd. */
371 intel_ring_emit(engine, MI_NOOP); 368 intel_ring_emit(engine, MI_NOOP);
@@ -408,12 +405,11 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
408 */ 405 */
409static int intel_overlay_release_old_vid(struct intel_overlay *overlay) 406static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
410{ 407{
411 struct drm_device *dev = overlay->dev; 408 struct drm_i915_private *dev_priv = overlay->i915;
412 struct drm_i915_private *dev_priv = dev->dev_private;
413 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 409 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
414 int ret; 410 int ret;
415 411
416 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 412 lockdep_assert_held(&dev_priv->dev->struct_mutex);
417 413
418 /* Only wait if there is actually an old frame to release to 414 /* Only wait if there is actually an old frame to release to
419 * guarantee forward progress. 415 * guarantee forward progress.
@@ -537,10 +533,10 @@ static int uv_vsubsampling(u32 format)
537 } 533 }
538} 534}
539 535
540static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) 536static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
541{ 537{
542 u32 mask, shift, ret; 538 u32 mask, shift, ret;
543 if (IS_GEN2(dev)) { 539 if (IS_GEN2(dev_priv)) {
544 mask = 0x1f; 540 mask = 0x1f;
545 shift = 5; 541 shift = 5;
546 } else { 542 } else {
@@ -548,7 +544,7 @@ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
548 shift = 6; 544 shift = 6;
549 } 545 }
550 ret = ((offset + width + mask) >> shift) - (offset >> shift); 546 ret = ((offset + width + mask) >> shift) - (offset >> shift);
551 if (!IS_GEN2(dev)) 547 if (!IS_GEN2(dev_priv))
552 ret <<= 1; 548 ret <<= 1;
553 ret -= 1; 549 ret -= 1;
554 return ret << 2; 550 return ret << 2;
@@ -741,12 +737,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
741 int ret, tmp_width; 737 int ret, tmp_width;
742 struct overlay_registers __iomem *regs; 738 struct overlay_registers __iomem *regs;
743 bool scale_changed = false; 739 bool scale_changed = false;
744 struct drm_device *dev = overlay->dev; 740 struct drm_i915_private *dev_priv = overlay->i915;
745 u32 swidth, swidthsw, sheight, ostride; 741 u32 swidth, swidthsw, sheight, ostride;
746 enum pipe pipe = overlay->crtc->pipe; 742 enum pipe pipe = overlay->crtc->pipe;
747 743
748 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 744 lockdep_assert_held(&dev_priv->dev->struct_mutex);
749 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 745 WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
750 746
751 ret = intel_overlay_release_old_vid(overlay); 747 ret = intel_overlay_release_old_vid(overlay);
752 if (ret != 0) 748 if (ret != 0)
@@ -769,7 +765,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
769 goto out_unpin; 765 goto out_unpin;
770 } 766 }
771 oconfig = OCONF_CC_OUT_8BIT; 767 oconfig = OCONF_CC_OUT_8BIT;
772 if (IS_GEN4(overlay->dev)) 768 if (IS_GEN4(dev_priv))
773 oconfig |= OCONF_CSC_MODE_BT709; 769 oconfig |= OCONF_CSC_MODE_BT709;
774 oconfig |= pipe == 0 ? 770 oconfig |= pipe == 0 ?
775 OCONF_PIPE_A : OCONF_PIPE_B; 771 OCONF_PIPE_A : OCONF_PIPE_B;
@@ -796,7 +792,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
796 tmp_width = params->src_w; 792 tmp_width = params->src_w;
797 793
798 swidth = params->src_w; 794 swidth = params->src_w;
799 swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); 795 swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
800 sheight = params->src_h; 796 sheight = params->src_h;
801 iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y); 797 iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
802 ostride = params->stride_Y; 798 ostride = params->stride_Y;
@@ -806,9 +802,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
806 int uv_vscale = uv_vsubsampling(params->format); 802 int uv_vscale = uv_vsubsampling(params->format);
807 u32 tmp_U, tmp_V; 803 u32 tmp_U, tmp_V;
808 swidth |= (params->src_w/uv_hscale) << 16; 804 swidth |= (params->src_w/uv_hscale) << 16;
809 tmp_U = calc_swidthsw(overlay->dev, params->offset_U, 805 tmp_U = calc_swidthsw(dev_priv, params->offset_U,
810 params->src_w/uv_hscale); 806 params->src_w/uv_hscale);
811 tmp_V = calc_swidthsw(overlay->dev, params->offset_V, 807 tmp_V = calc_swidthsw(dev_priv, params->offset_V,
812 params->src_w/uv_hscale); 808 params->src_w/uv_hscale);
813 swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; 809 swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
814 sheight |= (params->src_h/uv_vscale) << 16; 810 sheight |= (params->src_h/uv_vscale) << 16;
@@ -840,8 +836,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
840 overlay->old_vid_bo = overlay->vid_bo; 836 overlay->old_vid_bo = overlay->vid_bo;
841 overlay->vid_bo = new_bo; 837 overlay->vid_bo = new_bo;
842 838
843 intel_frontbuffer_flip(dev, 839 intel_frontbuffer_flip(dev_priv->dev, INTEL_FRONTBUFFER_OVERLAY(pipe));
844 INTEL_FRONTBUFFER_OVERLAY(pipe));
845 840
846 return 0; 841 return 0;
847 842
@@ -852,12 +847,12 @@ out_unpin:
852 847
853int intel_overlay_switch_off(struct intel_overlay *overlay) 848int intel_overlay_switch_off(struct intel_overlay *overlay)
854{ 849{
850 struct drm_i915_private *dev_priv = overlay->i915;
855 struct overlay_registers __iomem *regs; 851 struct overlay_registers __iomem *regs;
856 struct drm_device *dev = overlay->dev;
857 int ret; 852 int ret;
858 853
859 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 854 lockdep_assert_held(&dev_priv->dev->struct_mutex);
860 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 855 WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
861 856
862 ret = intel_overlay_recover_from_interrupt(overlay); 857 ret = intel_overlay_recover_from_interrupt(overlay);
863 if (ret != 0) 858 if (ret != 0)
@@ -897,15 +892,14 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
897 892
898static void update_pfit_vscale_ratio(struct intel_overlay *overlay) 893static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
899{ 894{
900 struct drm_device *dev = overlay->dev; 895 struct drm_i915_private *dev_priv = overlay->i915;
901 struct drm_i915_private *dev_priv = dev->dev_private;
902 u32 pfit_control = I915_READ(PFIT_CONTROL); 896 u32 pfit_control = I915_READ(PFIT_CONTROL);
903 u32 ratio; 897 u32 ratio;
904 898
905 /* XXX: This is not the same logic as in the xorg driver, but more in 899 /* XXX: This is not the same logic as in the xorg driver, but more in
906 * line with the intel documentation for the i965 900 * line with the intel documentation for the i965
907 */ 901 */
908 if (INTEL_INFO(dev)->gen >= 4) { 902 if (INTEL_GEN(dev_priv) >= 4) {
909 /* on i965 use the PGM reg to read out the autoscaler values */ 903 /* on i965 use the PGM reg to read out the autoscaler values */
910 ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; 904 ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
911 } else { 905 } else {
@@ -948,7 +942,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
948 return 0; 942 return 0;
949} 943}
950 944
951static int check_overlay_src(struct drm_device *dev, 945static int check_overlay_src(struct drm_i915_private *dev_priv,
952 struct drm_intel_overlay_put_image *rec, 946 struct drm_intel_overlay_put_image *rec,
953 struct drm_i915_gem_object *new_bo) 947 struct drm_i915_gem_object *new_bo)
954{ 948{
@@ -959,7 +953,7 @@ static int check_overlay_src(struct drm_device *dev,
959 u32 tmp; 953 u32 tmp;
960 954
961 /* check src dimensions */ 955 /* check src dimensions */
962 if (IS_845G(dev) || IS_I830(dev)) { 956 if (IS_845G(dev_priv) || IS_I830(dev_priv)) {
963 if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || 957 if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
964 rec->src_width > IMAGE_MAX_WIDTH_LEGACY) 958 rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
965 return -EINVAL; 959 return -EINVAL;
@@ -1011,14 +1005,14 @@ static int check_overlay_src(struct drm_device *dev,
1011 return -EINVAL; 1005 return -EINVAL;
1012 1006
1013 /* stride checking */ 1007 /* stride checking */
1014 if (IS_I830(dev) || IS_845G(dev)) 1008 if (IS_I830(dev_priv) || IS_845G(dev_priv))
1015 stride_mask = 255; 1009 stride_mask = 255;
1016 else 1010 else
1017 stride_mask = 63; 1011 stride_mask = 63;
1018 1012
1019 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) 1013 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
1020 return -EINVAL; 1014 return -EINVAL;
1021 if (IS_GEN4(dev) && rec->stride_Y < 512) 1015 if (IS_GEN4(dev_priv) && rec->stride_Y < 512)
1022 return -EINVAL; 1016 return -EINVAL;
1023 1017
1024 tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? 1018 tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@@ -1063,13 +1057,13 @@ static int check_overlay_src(struct drm_device *dev,
1063 * Return the pipe currently connected to the panel fitter, 1057 * Return the pipe currently connected to the panel fitter,
1064 * or -1 if the panel fitter is not present or not in use 1058 * or -1 if the panel fitter is not present or not in use
1065 */ 1059 */
1066static int intel_panel_fitter_pipe(struct drm_device *dev) 1060static int intel_panel_fitter_pipe(struct drm_i915_private *dev_priv)
1067{ 1061{
1068 struct drm_i915_private *dev_priv = dev->dev_private;
1069 u32 pfit_control; 1062 u32 pfit_control;
1070 1063
1071 /* i830 doesn't have a panel fitter */ 1064 /* i830 doesn't have a panel fitter */
1072 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) 1065 if (INTEL_GEN(dev_priv) <= 3 &&
1066 (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
1073 return -1; 1067 return -1;
1074 1068
1075 pfit_control = I915_READ(PFIT_CONTROL); 1069 pfit_control = I915_READ(PFIT_CONTROL);
@@ -1079,15 +1073,15 @@ static int intel_panel_fitter_pipe(struct drm_device *dev)
1079 return -1; 1073 return -1;
1080 1074
1081 /* 965 can place panel fitter on either pipe */ 1075 /* 965 can place panel fitter on either pipe */
1082 if (IS_GEN4(dev)) 1076 if (IS_GEN4(dev_priv))
1083 return (pfit_control >> 29) & 0x3; 1077 return (pfit_control >> 29) & 0x3;
1084 1078
1085 /* older chips can only use pipe 1 */ 1079 /* older chips can only use pipe 1 */
1086 return 1; 1080 return 1;
1087} 1081}
1088 1082
1089int intel_overlay_put_image(struct drm_device *dev, void *data, 1083int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
1090 struct drm_file *file_priv) 1084 struct drm_file *file_priv)
1091{ 1085{
1092 struct drm_intel_overlay_put_image *put_image_rec = data; 1086 struct drm_intel_overlay_put_image *put_image_rec = data;
1093 struct drm_i915_private *dev_priv = dev->dev_private; 1087 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1162,7 +1156,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1162 1156
1163 /* line too wide, i.e. one-line-mode */ 1157 /* line too wide, i.e. one-line-mode */
1164 if (mode->hdisplay > 1024 && 1158 if (mode->hdisplay > 1024 &&
1165 intel_panel_fitter_pipe(dev) == crtc->pipe) { 1159 intel_panel_fitter_pipe(dev_priv) == crtc->pipe) {
1166 overlay->pfit_active = true; 1160 overlay->pfit_active = true;
1167 update_pfit_vscale_ratio(overlay); 1161 update_pfit_vscale_ratio(overlay);
1168 } else 1162 } else
@@ -1196,7 +1190,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1196 goto out_unlock; 1190 goto out_unlock;
1197 } 1191 }
1198 1192
1199 ret = check_overlay_src(dev, put_image_rec, new_bo); 1193 ret = check_overlay_src(dev_priv, put_image_rec, new_bo);
1200 if (ret != 0) 1194 if (ret != 0)
1201 goto out_unlock; 1195 goto out_unlock;
1202 params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK; 1196 params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
@@ -1284,8 +1278,8 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs)
1284 return 0; 1278 return 0;
1285} 1279}
1286 1280
1287int intel_overlay_attrs(struct drm_device *dev, void *data, 1281int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
1288 struct drm_file *file_priv) 1282 struct drm_file *file_priv)
1289{ 1283{
1290 struct drm_intel_overlay_attrs *attrs = data; 1284 struct drm_intel_overlay_attrs *attrs = data;
1291 struct drm_i915_private *dev_priv = dev->dev_private; 1285 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1309,7 +1303,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1309 attrs->contrast = overlay->contrast; 1303 attrs->contrast = overlay->contrast;
1310 attrs->saturation = overlay->saturation; 1304 attrs->saturation = overlay->saturation;
1311 1305
1312 if (!IS_GEN2(dev)) { 1306 if (!IS_GEN2(dev_priv)) {
1313 attrs->gamma0 = I915_READ(OGAMC0); 1307 attrs->gamma0 = I915_READ(OGAMC0);
1314 attrs->gamma1 = I915_READ(OGAMC1); 1308 attrs->gamma1 = I915_READ(OGAMC1);
1315 attrs->gamma2 = I915_READ(OGAMC2); 1309 attrs->gamma2 = I915_READ(OGAMC2);
@@ -1341,7 +1335,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1341 intel_overlay_unmap_regs(overlay, regs); 1335 intel_overlay_unmap_regs(overlay, regs);
1342 1336
1343 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { 1337 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
1344 if (IS_GEN2(dev)) 1338 if (IS_GEN2(dev_priv))
1345 goto out_unlock; 1339 goto out_unlock;
1346 1340
1347 if (overlay->active) { 1341 if (overlay->active) {
@@ -1371,37 +1365,36 @@ out_unlock:
1371 return ret; 1365 return ret;
1372} 1366}
1373 1367
1374void intel_setup_overlay(struct drm_device *dev) 1368void intel_setup_overlay(struct drm_i915_private *dev_priv)
1375{ 1369{
1376 struct drm_i915_private *dev_priv = dev->dev_private;
1377 struct intel_overlay *overlay; 1370 struct intel_overlay *overlay;
1378 struct drm_i915_gem_object *reg_bo; 1371 struct drm_i915_gem_object *reg_bo;
1379 struct overlay_registers __iomem *regs; 1372 struct overlay_registers __iomem *regs;
1380 int ret; 1373 int ret;
1381 1374
1382 if (!HAS_OVERLAY(dev)) 1375 if (!HAS_OVERLAY(dev_priv))
1383 return; 1376 return;
1384 1377
1385 overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); 1378 overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
1386 if (!overlay) 1379 if (!overlay)
1387 return; 1380 return;
1388 1381
1389 mutex_lock(&dev->struct_mutex); 1382 mutex_lock(&dev_priv->dev->struct_mutex);
1390 if (WARN_ON(dev_priv->overlay)) 1383 if (WARN_ON(dev_priv->overlay))
1391 goto out_free; 1384 goto out_free;
1392 1385
1393 overlay->dev = dev; 1386 overlay->i915 = dev_priv;
1394 1387
1395 reg_bo = NULL; 1388 reg_bo = NULL;
1396 if (!OVERLAY_NEEDS_PHYSICAL(dev)) 1389 if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
1397 reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); 1390 reg_bo = i915_gem_object_create_stolen(dev_priv->dev, PAGE_SIZE);
1398 if (reg_bo == NULL)
1399 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
1400 if (reg_bo == NULL) 1391 if (reg_bo == NULL)
1392 reg_bo = i915_gem_object_create(dev_priv->dev, PAGE_SIZE);
1393 if (IS_ERR(reg_bo))
1401 goto out_free; 1394 goto out_free;
1402 overlay->reg_bo = reg_bo; 1395 overlay->reg_bo = reg_bo;
1403 1396
1404 if (OVERLAY_NEEDS_PHYSICAL(dev)) { 1397 if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
1405 ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE); 1398 ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
1406 if (ret) { 1399 if (ret) {
1407 DRM_ERROR("failed to attach phys overlay regs\n"); 1400 DRM_ERROR("failed to attach phys overlay regs\n");
@@ -1441,25 +1434,23 @@ void intel_setup_overlay(struct drm_device *dev)
1441 intel_overlay_unmap_regs(overlay, regs); 1434 intel_overlay_unmap_regs(overlay, regs);
1442 1435
1443 dev_priv->overlay = overlay; 1436 dev_priv->overlay = overlay;
1444 mutex_unlock(&dev->struct_mutex); 1437 mutex_unlock(&dev_priv->dev->struct_mutex);
1445 DRM_INFO("initialized overlay support\n"); 1438 DRM_INFO("initialized overlay support\n");
1446 return; 1439 return;
1447 1440
1448out_unpin_bo: 1441out_unpin_bo:
1449 if (!OVERLAY_NEEDS_PHYSICAL(dev)) 1442 if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
1450 i915_gem_object_ggtt_unpin(reg_bo); 1443 i915_gem_object_ggtt_unpin(reg_bo);
1451out_free_bo: 1444out_free_bo:
1452 drm_gem_object_unreference(&reg_bo->base); 1445 drm_gem_object_unreference(&reg_bo->base);
1453out_free: 1446out_free:
1454 mutex_unlock(&dev->struct_mutex); 1447 mutex_unlock(&dev_priv->dev->struct_mutex);
1455 kfree(overlay); 1448 kfree(overlay);
1456 return; 1449 return;
1457} 1450}
1458 1451
1459void intel_cleanup_overlay(struct drm_device *dev) 1452void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
1460{ 1453{
1461 struct drm_i915_private *dev_priv = dev->dev_private;
1462
1463 if (!dev_priv->overlay) 1454 if (!dev_priv->overlay)
1464 return; 1455 return;
1465 1456
@@ -1482,18 +1473,17 @@ struct intel_overlay_error_state {
1482static struct overlay_registers __iomem * 1473static struct overlay_registers __iomem *
1483intel_overlay_map_regs_atomic(struct intel_overlay *overlay) 1474intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1484{ 1475{
1485 struct drm_i915_private *dev_priv = to_i915(overlay->dev); 1476 struct drm_i915_private *dev_priv = overlay->i915;
1486 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1487 struct overlay_registers __iomem *regs; 1477 struct overlay_registers __iomem *regs;
1488 1478
1489 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1479 if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
1490 /* Cast to make sparse happy, but it's wc memory anyway, so 1480 /* Cast to make sparse happy, but it's wc memory anyway, so
1491 * equivalent to the wc io mapping on X86. */ 1481 * equivalent to the wc io mapping on X86. */
1492 regs = (struct overlay_registers __iomem *) 1482 regs = (struct overlay_registers __iomem *)
1493 overlay->reg_bo->phys_handle->vaddr; 1483 overlay->reg_bo->phys_handle->vaddr;
1494 else 1484 else
1495 regs = io_mapping_map_atomic_wc(ggtt->mappable, 1485 regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
1496 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 1486 overlay->flip_addr);
1497 1487
1498 return regs; 1488 return regs;
1499} 1489}
@@ -1501,15 +1491,13 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1501static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, 1491static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
1502 struct overlay_registers __iomem *regs) 1492 struct overlay_registers __iomem *regs)
1503{ 1493{
1504 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1494 if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
1505 io_mapping_unmap_atomic(regs); 1495 io_mapping_unmap_atomic(regs);
1506} 1496}
1507 1497
1508
1509struct intel_overlay_error_state * 1498struct intel_overlay_error_state *
1510intel_overlay_capture_error_state(struct drm_device *dev) 1499intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
1511{ 1500{
1512 struct drm_i915_private *dev_priv = dev->dev_private;
1513 struct intel_overlay *overlay = dev_priv->overlay; 1501 struct intel_overlay *overlay = dev_priv->overlay;
1514 struct intel_overlay_error_state *error; 1502 struct intel_overlay_error_state *error;
1515 struct overlay_registers __iomem *regs; 1503 struct overlay_registers __iomem *regs;
@@ -1523,10 +1511,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
1523 1511
1524 error->dovsta = I915_READ(DOVSTA); 1512 error->dovsta = I915_READ(DOVSTA);
1525 error->isr = I915_READ(ISR); 1513 error->isr = I915_READ(ISR);
1526 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1514 error->base = overlay->flip_addr;
1527 error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
1528 else
1529 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
1530 1515
1531 regs = intel_overlay_map_regs_atomic(overlay); 1516 regs = intel_overlay_map_regs_atomic(overlay);
1532 if (!regs) 1517 if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 8357d571553a..f0b1602c3258 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1724,6 +1724,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
1724 container_of(panel, struct intel_connector, panel); 1724 container_of(panel, struct intel_connector, panel);
1725 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1725 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1726 1726
1727 if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
1728 intel_dp_aux_init_backlight_funcs(connector) == 0)
1729 return;
1730
1731 if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
1732 intel_dsi_dcs_init_backlight_funcs(connector) == 0)
1733 return;
1734
1727 if (IS_BROXTON(dev_priv)) { 1735 if (IS_BROXTON(dev_priv)) {
1728 panel->backlight.setup = bxt_setup_backlight; 1736 panel->backlight.setup = bxt_setup_backlight;
1729 panel->backlight.enable = bxt_enable_backlight; 1737 panel->backlight.enable = bxt_enable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a7ef45da0a9e..08274591db7e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <linux/cpufreq.h> 28#include <linux/cpufreq.h>
29#include <drm/drm_plane_helper.h>
29#include "i915_drv.h" 30#include "i915_drv.h"
30#include "intel_drv.h" 31#include "intel_drv.h"
31#include "../../../platform/x86/intel_ips.h" 32#include "../../../platform/x86/intel_ips.h"
@@ -58,6 +59,10 @@ static void bxt_init_clock_gating(struct drm_device *dev)
58{ 59{
59 struct drm_i915_private *dev_priv = dev->dev_private; 60 struct drm_i915_private *dev_priv = dev->dev_private;
60 61
62 /* See Bspec note for PSR2_CTL bit 31, Wa#828:bxt */
63 I915_WRITE(CHICKEN_PAR1_1,
64 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
65
61 /* WaDisableSDEUnitClockGating:bxt */ 66 /* WaDisableSDEUnitClockGating:bxt */
62 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 67 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
63 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 68 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
@@ -2012,10 +2017,10 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2012} 2017}
2013 2018
2014static uint32_t 2019static uint32_t
2015hsw_compute_linetime_wm(struct drm_device *dev, 2020hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2016 struct intel_crtc_state *cstate)
2017{ 2021{
2018 struct drm_i915_private *dev_priv = dev->dev_private; 2022 const struct intel_atomic_state *intel_state =
2023 to_intel_atomic_state(cstate->base.state);
2019 const struct drm_display_mode *adjusted_mode = 2024 const struct drm_display_mode *adjusted_mode =
2020 &cstate->base.adjusted_mode; 2025 &cstate->base.adjusted_mode;
2021 u32 linetime, ips_linetime; 2026 u32 linetime, ips_linetime;
@@ -2024,7 +2029,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
2024 return 0; 2029 return 0;
2025 if (WARN_ON(adjusted_mode->crtc_clock == 0)) 2030 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2026 return 0; 2031 return 0;
2027 if (WARN_ON(dev_priv->cdclk_freq == 0)) 2032 if (WARN_ON(intel_state->cdclk == 0))
2028 return 0; 2033 return 0;
2029 2034
2030 /* The WM are computed with base on how long it takes to fill a single 2035 /* The WM are computed with base on how long it takes to fill a single
@@ -2033,7 +2038,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
2033 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 2038 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2034 adjusted_mode->crtc_clock); 2039 adjusted_mode->crtc_clock);
2035 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 2040 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2036 dev_priv->cdclk_freq); 2041 intel_state->cdclk);
2037 2042
2038 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | 2043 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2039 PIPE_WM_LINETIME_TIME(linetime); 2044 PIPE_WM_LINETIME_TIME(linetime);
@@ -2146,14 +2151,14 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
2146static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5]) 2151static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2147{ 2152{
2148 /* ILK sprite LP0 latency is 1300 ns */ 2153 /* ILK sprite LP0 latency is 1300 ns */
2149 if (INTEL_INFO(dev)->gen == 5) 2154 if (IS_GEN5(dev))
2150 wm[0] = 13; 2155 wm[0] = 13;
2151} 2156}
2152 2157
2153static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) 2158static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2154{ 2159{
2155 /* ILK cursor LP0 latency is 1300 ns */ 2160 /* ILK cursor LP0 latency is 1300 ns */
2156 if (INTEL_INFO(dev)->gen == 5) 2161 if (IS_GEN5(dev))
2157 wm[0] = 13; 2162 wm[0] = 13;
2158 2163
2159 /* WaDoubleCursorLP3Latency:ivb */ 2164 /* WaDoubleCursorLP3Latency:ivb */
@@ -2309,7 +2314,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2309 int level, max_level = ilk_wm_max_level(dev), usable_level; 2314 int level, max_level = ilk_wm_max_level(dev), usable_level;
2310 struct ilk_wm_maximums max; 2315 struct ilk_wm_maximums max;
2311 2316
2312 pipe_wm = &cstate->wm.optimal.ilk; 2317 pipe_wm = &cstate->wm.ilk.optimal;
2313 2318
2314 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 2319 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2315 struct intel_plane_state *ps; 2320 struct intel_plane_state *ps;
@@ -2352,7 +2357,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2352 pipe_wm->wm[0] = pipe_wm->raw_wm[0]; 2357 pipe_wm->wm[0] = pipe_wm->raw_wm[0];
2353 2358
2354 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2359 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2355 pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate); 2360 pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
2356 2361
2357 if (!ilk_validate_pipe_wm(dev, pipe_wm)) 2362 if (!ilk_validate_pipe_wm(dev, pipe_wm))
2358 return -EINVAL; 2363 return -EINVAL;
@@ -2391,7 +2396,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
2391 struct intel_crtc *intel_crtc, 2396 struct intel_crtc *intel_crtc,
2392 struct intel_crtc_state *newstate) 2397 struct intel_crtc_state *newstate)
2393{ 2398{
2394 struct intel_pipe_wm *a = &newstate->wm.intermediate; 2399 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
2395 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk; 2400 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
2396 int level, max_level = ilk_wm_max_level(dev); 2401 int level, max_level = ilk_wm_max_level(dev);
2397 2402
@@ -2400,7 +2405,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
2400 * currently active watermarks to get values that are safe both before 2405 * currently active watermarks to get values that are safe both before
2401 * and after the vblank. 2406 * and after the vblank.
2402 */ 2407 */
2403 *a = newstate->wm.optimal.ilk; 2408 *a = newstate->wm.ilk.optimal;
2404 a->pipe_enabled |= b->pipe_enabled; 2409 a->pipe_enabled |= b->pipe_enabled;
2405 a->sprites_enabled |= b->sprites_enabled; 2410 a->sprites_enabled |= b->sprites_enabled;
2406 a->sprites_scaled |= b->sprites_scaled; 2411 a->sprites_scaled |= b->sprites_scaled;
@@ -2429,7 +2434,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
2429 * If our intermediate WM are identical to the final WM, then we can 2434 * If our intermediate WM are identical to the final WM, then we can
2430 * omit the post-vblank programming; only update if it's different. 2435 * omit the post-vblank programming; only update if it's different.
2431 */ 2436 */
2432 if (memcmp(a, &newstate->wm.optimal.ilk, sizeof(*a)) == 0) 2437 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0)
2433 newstate->wm.need_postvbl_update = false; 2438 newstate->wm.need_postvbl_update = false;
2434 2439
2435 return 0; 2440 return 0;
@@ -2849,20 +2854,29 @@ skl_wm_plane_id(const struct intel_plane *plane)
2849static void 2854static void
2850skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 2855skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2851 const struct intel_crtc_state *cstate, 2856 const struct intel_crtc_state *cstate,
2852 const struct intel_wm_config *config, 2857 struct skl_ddb_entry *alloc, /* out */
2853 struct skl_ddb_entry *alloc /* out */) 2858 int *num_active /* out */)
2854{ 2859{
2860 struct drm_atomic_state *state = cstate->base.state;
2861 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
2862 struct drm_i915_private *dev_priv = to_i915(dev);
2855 struct drm_crtc *for_crtc = cstate->base.crtc; 2863 struct drm_crtc *for_crtc = cstate->base.crtc;
2856 struct drm_crtc *crtc;
2857 unsigned int pipe_size, ddb_size; 2864 unsigned int pipe_size, ddb_size;
2858 int nth_active_pipe; 2865 int nth_active_pipe;
2866 int pipe = to_intel_crtc(for_crtc)->pipe;
2859 2867
2860 if (!cstate->base.active) { 2868 if (WARN_ON(!state) || !cstate->base.active) {
2861 alloc->start = 0; 2869 alloc->start = 0;
2862 alloc->end = 0; 2870 alloc->end = 0;
2871 *num_active = hweight32(dev_priv->active_crtcs);
2863 return; 2872 return;
2864 } 2873 }
2865 2874
2875 if (intel_state->active_pipe_changes)
2876 *num_active = hweight32(intel_state->active_crtcs);
2877 else
2878 *num_active = hweight32(dev_priv->active_crtcs);
2879
2866 if (IS_BROXTON(dev)) 2880 if (IS_BROXTON(dev))
2867 ddb_size = BXT_DDB_SIZE; 2881 ddb_size = BXT_DDB_SIZE;
2868 else 2882 else
@@ -2870,25 +2884,29 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2870 2884
2871 ddb_size -= 4; /* 4 blocks for bypass path allocation */ 2885 ddb_size -= 4; /* 4 blocks for bypass path allocation */
2872 2886
2873 nth_active_pipe = 0; 2887 /*
2874 for_each_crtc(dev, crtc) { 2888 * If the state doesn't change the active CRTC's, then there's
2875 if (!to_intel_crtc(crtc)->active) 2889 * no need to recalculate; the existing pipe allocation limits
2876 continue; 2890 * should remain unchanged. Note that we're safe from racing
2877 2891 * commits since any racing commit that changes the active CRTC
2878 if (crtc == for_crtc) 2892 * list would need to grab _all_ crtc locks, including the one
2879 break; 2893 * we currently hold.
2880 2894 */
2881 nth_active_pipe++; 2895 if (!intel_state->active_pipe_changes) {
2896 *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
2897 return;
2882 } 2898 }
2883 2899
2884 pipe_size = ddb_size / config->num_pipes_active; 2900 nth_active_pipe = hweight32(intel_state->active_crtcs &
2885 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active; 2901 (drm_crtc_mask(for_crtc) - 1));
2902 pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
2903 alloc->start = nth_active_pipe * ddb_size / *num_active;
2886 alloc->end = alloc->start + pipe_size; 2904 alloc->end = alloc->start + pipe_size;
2887} 2905}
2888 2906
2889static unsigned int skl_cursor_allocation(const struct intel_wm_config *config) 2907static unsigned int skl_cursor_allocation(int num_active)
2890{ 2908{
2891 if (config->num_pipes_active == 1) 2909 if (num_active == 1)
2892 return 32; 2910 return 32;
2893 2911
2894 return 8; 2912 return 8;
@@ -2932,6 +2950,46 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2932 } 2950 }
2933} 2951}
2934 2952
2953/*
2954 * Determines the downscale amount of a plane for the purposes of watermark calculations.
2955 * The bspec defines downscale amount as:
2956 *
2957 * """
2958 * Horizontal down scale amount = maximum[1, Horizontal source size /
2959 * Horizontal destination size]
2960 * Vertical down scale amount = maximum[1, Vertical source size /
2961 * Vertical destination size]
2962 * Total down scale amount = Horizontal down scale amount *
2963 * Vertical down scale amount
2964 * """
2965 *
2966 * Return value is provided in 16.16 fixed point form to retain fractional part.
2967 * Caller should take care of dividing & rounding off the value.
2968 */
2969static uint32_t
2970skl_plane_downscale_amount(const struct intel_plane_state *pstate)
2971{
2972 uint32_t downscale_h, downscale_w;
2973 uint32_t src_w, src_h, dst_w, dst_h;
2974
2975 if (WARN_ON(!pstate->visible))
2976 return DRM_PLANE_HELPER_NO_SCALING;
2977
2978 /* n.b., src is 16.16 fixed point, dst is whole integer */
2979 src_w = drm_rect_width(&pstate->src);
2980 src_h = drm_rect_height(&pstate->src);
2981 dst_w = drm_rect_width(&pstate->dst);
2982 dst_h = drm_rect_height(&pstate->dst);
2983 if (intel_rotation_90_or_270(pstate->base.rotation))
2984 swap(dst_w, dst_h);
2985
2986 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
2987 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
2988
2989 /* Provide result in 16.16 fixed point */
2990 return (uint64_t)downscale_w * downscale_h >> 16;
2991}
2992
2935static unsigned int 2993static unsigned int
2936skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, 2994skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2937 const struct drm_plane_state *pstate, 2995 const struct drm_plane_state *pstate,
@@ -2939,7 +2997,16 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2939{ 2997{
2940 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); 2998 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
2941 struct drm_framebuffer *fb = pstate->fb; 2999 struct drm_framebuffer *fb = pstate->fb;
3000 uint32_t down_scale_amount, data_rate;
2942 uint32_t width = 0, height = 0; 3001 uint32_t width = 0, height = 0;
3002 unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
3003
3004 if (!intel_pstate->visible)
3005 return 0;
3006 if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
3007 return 0;
3008 if (y && format != DRM_FORMAT_NV12)
3009 return 0;
2943 3010
2944 width = drm_rect_width(&intel_pstate->src) >> 16; 3011 width = drm_rect_width(&intel_pstate->src) >> 16;
2945 height = drm_rect_height(&intel_pstate->src) >> 16; 3012 height = drm_rect_height(&intel_pstate->src) >> 16;
@@ -2948,17 +3015,21 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2948 swap(width, height); 3015 swap(width, height);
2949 3016
2950 /* for planar format */ 3017 /* for planar format */
2951 if (fb->pixel_format == DRM_FORMAT_NV12) { 3018 if (format == DRM_FORMAT_NV12) {
2952 if (y) /* y-plane data rate */ 3019 if (y) /* y-plane data rate */
2953 return width * height * 3020 data_rate = width * height *
2954 drm_format_plane_cpp(fb->pixel_format, 0); 3021 drm_format_plane_cpp(format, 0);
2955 else /* uv-plane data rate */ 3022 else /* uv-plane data rate */
2956 return (width / 2) * (height / 2) * 3023 data_rate = (width / 2) * (height / 2) *
2957 drm_format_plane_cpp(fb->pixel_format, 1); 3024 drm_format_plane_cpp(format, 1);
3025 } else {
3026 /* for packed formats */
3027 data_rate = width * height * drm_format_plane_cpp(format, 0);
2958 } 3028 }
2959 3029
2960 /* for packed formats */ 3030 down_scale_amount = skl_plane_downscale_amount(intel_pstate);
2961 return width * height * drm_format_plane_cpp(fb->pixel_format, 0); 3031
3032 return (uint64_t)data_rate * down_scale_amount >> 16;
2962} 3033}
2963 3034
2964/* 3035/*
@@ -2967,86 +3038,188 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2967 * 3 * 4096 * 8192 * 4 < 2^32 3038 * 3 * 4096 * 8192 * 4 < 2^32
2968 */ 3039 */
2969static unsigned int 3040static unsigned int
2970skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate) 3041skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
2971{ 3042{
2972 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 3043 struct drm_crtc_state *cstate = &intel_cstate->base;
2973 struct drm_device *dev = intel_crtc->base.dev; 3044 struct drm_atomic_state *state = cstate->state;
3045 struct drm_crtc *crtc = cstate->crtc;
3046 struct drm_device *dev = crtc->dev;
3047 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3048 const struct drm_plane *plane;
2974 const struct intel_plane *intel_plane; 3049 const struct intel_plane *intel_plane;
2975 unsigned int total_data_rate = 0; 3050 struct drm_plane_state *pstate;
3051 unsigned int rate, total_data_rate = 0;
3052 int id;
3053 int i;
2976 3054
2977 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3055 if (WARN_ON(!state))
2978 const struct drm_plane_state *pstate = intel_plane->base.state; 3056 return 0;
2979 3057
2980 if (pstate->fb == NULL) 3058 /* Calculate and cache data rate for each plane */
2981 continue; 3059 for_each_plane_in_state(state, plane, pstate, i) {
3060 id = skl_wm_plane_id(to_intel_plane(plane));
3061 intel_plane = to_intel_plane(plane);
2982 3062
2983 if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR) 3063 if (intel_plane->pipe != intel_crtc->pipe)
2984 continue; 3064 continue;
2985 3065
2986 /* packed/uv */ 3066 /* packed/uv */
2987 total_data_rate += skl_plane_relative_data_rate(cstate, 3067 rate = skl_plane_relative_data_rate(intel_cstate,
2988 pstate, 3068 pstate, 0);
2989 0); 3069 intel_cstate->wm.skl.plane_data_rate[id] = rate;
3070
3071 /* y-plane */
3072 rate = skl_plane_relative_data_rate(intel_cstate,
3073 pstate, 1);
3074 intel_cstate->wm.skl.plane_y_data_rate[id] = rate;
3075 }
3076
3077 /* Calculate CRTC's total data rate from cached values */
3078 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3079 int id = skl_wm_plane_id(intel_plane);
2990 3080
2991 if (pstate->fb->pixel_format == DRM_FORMAT_NV12) 3081 /* packed/uv */
2992 /* y-plane */ 3082 total_data_rate += intel_cstate->wm.skl.plane_data_rate[id];
2993 total_data_rate += skl_plane_relative_data_rate(cstate, 3083 total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
2994 pstate,
2995 1);
2996 } 3084 }
2997 3085
3086 WARN_ON(cstate->plane_mask && total_data_rate == 0);
3087
2998 return total_data_rate; 3088 return total_data_rate;
2999} 3089}
3000 3090
3001static void 3091static uint16_t
3092skl_ddb_min_alloc(const struct drm_plane_state *pstate,
3093 const int y)
3094{
3095 struct drm_framebuffer *fb = pstate->fb;
3096 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3097 uint32_t src_w, src_h;
3098 uint32_t min_scanlines = 8;
3099 uint8_t plane_bpp;
3100
3101 if (WARN_ON(!fb))
3102 return 0;
3103
3104 /* For packed formats, no y-plane, return 0 */
3105 if (y && fb->pixel_format != DRM_FORMAT_NV12)
3106 return 0;
3107
3108 /* For Non Y-tile return 8-blocks */
3109 if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED &&
3110 fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
3111 return 8;
3112
3113 src_w = drm_rect_width(&intel_pstate->src) >> 16;
3114 src_h = drm_rect_height(&intel_pstate->src) >> 16;
3115
3116 if (intel_rotation_90_or_270(pstate->rotation))
3117 swap(src_w, src_h);
3118
3119 /* Halve UV plane width and height for NV12 */
3120 if (fb->pixel_format == DRM_FORMAT_NV12 && !y) {
3121 src_w /= 2;
3122 src_h /= 2;
3123 }
3124
3125 if (fb->pixel_format == DRM_FORMAT_NV12 && !y)
3126 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1);
3127 else
3128 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
3129
3130 if (intel_rotation_90_or_270(pstate->rotation)) {
3131 switch (plane_bpp) {
3132 case 1:
3133 min_scanlines = 32;
3134 break;
3135 case 2:
3136 min_scanlines = 16;
3137 break;
3138 case 4:
3139 min_scanlines = 8;
3140 break;
3141 case 8:
3142 min_scanlines = 4;
3143 break;
3144 default:
3145 WARN(1, "Unsupported pixel depth %u for rotation",
3146 plane_bpp);
3147 min_scanlines = 32;
3148 }
3149 }
3150
3151 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
3152}
3153
3154static int
3002skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, 3155skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3003 struct skl_ddb_allocation *ddb /* out */) 3156 struct skl_ddb_allocation *ddb /* out */)
3004{ 3157{
3158 struct drm_atomic_state *state = cstate->base.state;
3005 struct drm_crtc *crtc = cstate->base.crtc; 3159 struct drm_crtc *crtc = cstate->base.crtc;
3006 struct drm_device *dev = crtc->dev; 3160 struct drm_device *dev = crtc->dev;
3007 struct drm_i915_private *dev_priv = to_i915(dev);
3008 struct intel_wm_config *config = &dev_priv->wm.config;
3009 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3161 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3010 struct intel_plane *intel_plane; 3162 struct intel_plane *intel_plane;
3163 struct drm_plane *plane;
3164 struct drm_plane_state *pstate;
3011 enum pipe pipe = intel_crtc->pipe; 3165 enum pipe pipe = intel_crtc->pipe;
3012 struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; 3166 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
3013 uint16_t alloc_size, start, cursor_blocks; 3167 uint16_t alloc_size, start, cursor_blocks;
3014 uint16_t minimum[I915_MAX_PLANES]; 3168 uint16_t *minimum = cstate->wm.skl.minimum_blocks;
3015 uint16_t y_minimum[I915_MAX_PLANES]; 3169 uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
3016 unsigned int total_data_rate; 3170 unsigned int total_data_rate;
3171 int num_active;
3172 int id, i;
3173
3174 if (WARN_ON(!state))
3175 return 0;
3017 3176
3018 skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc); 3177 if (!cstate->base.active) {
3178 ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
3179 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3180 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
3181 return 0;
3182 }
3183
3184 skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
3019 alloc_size = skl_ddb_entry_size(alloc); 3185 alloc_size = skl_ddb_entry_size(alloc);
3020 if (alloc_size == 0) { 3186 if (alloc_size == 0) {
3021 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); 3187 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3022 memset(&ddb->plane[pipe][PLANE_CURSOR], 0, 3188 return 0;
3023 sizeof(ddb->plane[pipe][PLANE_CURSOR]));
3024 return;
3025 } 3189 }
3026 3190
3027 cursor_blocks = skl_cursor_allocation(config); 3191 cursor_blocks = skl_cursor_allocation(num_active);
3028 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks; 3192 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
3029 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end; 3193 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
3030 3194
3031 alloc_size -= cursor_blocks; 3195 alloc_size -= cursor_blocks;
3032 alloc->end -= cursor_blocks;
3033 3196
3034 /* 1. Allocate the mininum required blocks for each active plane */ 3197 /* 1. Allocate the mininum required blocks for each active plane */
3035 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3198 for_each_plane_in_state(state, plane, pstate, i) {
3036 struct drm_plane *plane = &intel_plane->base; 3199 intel_plane = to_intel_plane(plane);
3037 struct drm_framebuffer *fb = plane->state->fb; 3200 id = skl_wm_plane_id(intel_plane);
3038 int id = skl_wm_plane_id(intel_plane);
3039 3201
3040 if (!to_intel_plane_state(plane->state)->visible) 3202 if (intel_plane->pipe != pipe)
3041 continue; 3203 continue;
3042 3204
3043 if (plane->type == DRM_PLANE_TYPE_CURSOR) 3205 if (!to_intel_plane_state(pstate)->visible) {
3206 minimum[id] = 0;
3207 y_minimum[id] = 0;
3208 continue;
3209 }
3210 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3211 minimum[id] = 0;
3212 y_minimum[id] = 0;
3044 continue; 3213 continue;
3214 }
3045 3215
3046 minimum[id] = 8; 3216 minimum[id] = skl_ddb_min_alloc(pstate, 0);
3047 alloc_size -= minimum[id]; 3217 y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
3048 y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0; 3218 }
3049 alloc_size -= y_minimum[id]; 3219
3220 for (i = 0; i < PLANE_CURSOR; i++) {
3221 alloc_size -= minimum[i];
3222 alloc_size -= y_minimum[i];
3050 } 3223 }
3051 3224
3052 /* 3225 /*
@@ -3056,21 +3229,16 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3056 * FIXME: we may not allocate every single block here. 3229 * FIXME: we may not allocate every single block here.
3057 */ 3230 */
3058 total_data_rate = skl_get_total_relative_data_rate(cstate); 3231 total_data_rate = skl_get_total_relative_data_rate(cstate);
3232 if (total_data_rate == 0)
3233 return 0;
3059 3234
3060 start = alloc->start; 3235 start = alloc->start;
3061 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3236 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3062 struct drm_plane *plane = &intel_plane->base;
3063 struct drm_plane_state *pstate = intel_plane->base.state;
3064 unsigned int data_rate, y_data_rate; 3237 unsigned int data_rate, y_data_rate;
3065 uint16_t plane_blocks, y_plane_blocks = 0; 3238 uint16_t plane_blocks, y_plane_blocks = 0;
3066 int id = skl_wm_plane_id(intel_plane); 3239 int id = skl_wm_plane_id(intel_plane);
3067 3240
3068 if (!to_intel_plane_state(pstate)->visible) 3241 data_rate = cstate->wm.skl.plane_data_rate[id];
3069 continue;
3070 if (plane->type == DRM_PLANE_TYPE_CURSOR)
3071 continue;
3072
3073 data_rate = skl_plane_relative_data_rate(cstate, pstate, 0);
3074 3242
3075 /* 3243 /*
3076 * allocation for (packed formats) or (uv-plane part of planar format): 3244 * allocation for (packed formats) or (uv-plane part of planar format):
@@ -3081,30 +3249,32 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3081 plane_blocks += div_u64((uint64_t)alloc_size * data_rate, 3249 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3082 total_data_rate); 3250 total_data_rate);
3083 3251
3084 ddb->plane[pipe][id].start = start; 3252 /* Leave disabled planes at (0,0) */
3085 ddb->plane[pipe][id].end = start + plane_blocks; 3253 if (data_rate) {
3254 ddb->plane[pipe][id].start = start;
3255 ddb->plane[pipe][id].end = start + plane_blocks;
3256 }
3086 3257
3087 start += plane_blocks; 3258 start += plane_blocks;
3088 3259
3089 /* 3260 /*
3090 * allocation for y_plane part of planar format: 3261 * allocation for y_plane part of planar format:
3091 */ 3262 */
3092 if (pstate->fb->pixel_format == DRM_FORMAT_NV12) { 3263 y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
3093 y_data_rate = skl_plane_relative_data_rate(cstate, 3264
3094 pstate, 3265 y_plane_blocks = y_minimum[id];
3095 1); 3266 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3096 y_plane_blocks = y_minimum[id]; 3267 total_data_rate);
3097 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3098 total_data_rate);
3099 3268
3269 if (y_data_rate) {
3100 ddb->y_plane[pipe][id].start = start; 3270 ddb->y_plane[pipe][id].start = start;
3101 ddb->y_plane[pipe][id].end = start + y_plane_blocks; 3271 ddb->y_plane[pipe][id].end = start + y_plane_blocks;
3102
3103 start += y_plane_blocks;
3104 } 3272 }
3105 3273
3274 start += y_plane_blocks;
3106 } 3275 }
3107 3276
3277 return 0;
3108} 3278}
3109 3279
3110static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config) 3280static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
@@ -3161,35 +3331,41 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3161 return ret; 3331 return ret;
3162} 3332}
3163 3333
3164static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb, 3334static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
3165 const struct intel_crtc *intel_crtc) 3335 struct intel_plane_state *pstate)
3166{ 3336{
3167 struct drm_device *dev = intel_crtc->base.dev; 3337 uint64_t adjusted_pixel_rate;
3168 struct drm_i915_private *dev_priv = dev->dev_private; 3338 uint64_t downscale_amount;
3169 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; 3339 uint64_t pixel_rate;
3340
3341 /* Shouldn't reach here on disabled planes... */
3342 if (WARN_ON(!pstate->visible))
3343 return 0;
3170 3344
3171 /* 3345 /*
3172 * If ddb allocation of pipes changed, it may require recalculation of 3346 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
3173 * watermarks 3347 * with additional adjustments for plane-specific scaling.
3174 */ 3348 */
3175 if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe))) 3349 adjusted_pixel_rate = skl_pipe_pixel_rate(cstate);
3176 return true; 3350 downscale_amount = skl_plane_downscale_amount(pstate);
3351
3352 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
3353 WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
3177 3354
3178 return false; 3355 return pixel_rate;
3179} 3356}
3180 3357
3181static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, 3358static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3182 struct intel_crtc_state *cstate, 3359 struct intel_crtc_state *cstate,
3183 struct intel_plane *intel_plane, 3360 struct intel_plane_state *intel_pstate,
3184 uint16_t ddb_allocation, 3361 uint16_t ddb_allocation,
3185 int level, 3362 int level,
3186 uint16_t *out_blocks, /* out */ 3363 uint16_t *out_blocks, /* out */
3187 uint8_t *out_lines /* out */) 3364 uint8_t *out_lines, /* out */
3365 bool *enabled /* out */)
3188{ 3366{
3189 struct drm_plane *plane = &intel_plane->base; 3367 struct drm_plane_state *pstate = &intel_pstate->base;
3190 struct drm_framebuffer *fb = plane->state->fb; 3368 struct drm_framebuffer *fb = pstate->fb;
3191 struct intel_plane_state *intel_pstate =
3192 to_intel_plane_state(plane->state);
3193 uint32_t latency = dev_priv->wm.skl_latency[level]; 3369 uint32_t latency = dev_priv->wm.skl_latency[level];
3194 uint32_t method1, method2; 3370 uint32_t method1, method2;
3195 uint32_t plane_bytes_per_line, plane_blocks_per_line; 3371 uint32_t plane_bytes_per_line, plane_blocks_per_line;
@@ -3197,20 +3373,24 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3197 uint32_t selected_result; 3373 uint32_t selected_result;
3198 uint8_t cpp; 3374 uint8_t cpp;
3199 uint32_t width = 0, height = 0; 3375 uint32_t width = 0, height = 0;
3376 uint32_t plane_pixel_rate;
3200 3377
3201 if (latency == 0 || !cstate->base.active || !intel_pstate->visible) 3378 if (latency == 0 || !cstate->base.active || !intel_pstate->visible) {
3202 return false; 3379 *enabled = false;
3380 return 0;
3381 }
3203 3382
3204 width = drm_rect_width(&intel_pstate->src) >> 16; 3383 width = drm_rect_width(&intel_pstate->src) >> 16;
3205 height = drm_rect_height(&intel_pstate->src) >> 16; 3384 height = drm_rect_height(&intel_pstate->src) >> 16;
3206 3385
3207 if (intel_rotation_90_or_270(plane->state->rotation)) 3386 if (intel_rotation_90_or_270(pstate->rotation))
3208 swap(width, height); 3387 swap(width, height);
3209 3388
3210 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 3389 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3211 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), 3390 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
3212 cpp, latency); 3391
3213 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), 3392 method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
3393 method2 = skl_wm_method2(plane_pixel_rate,
3214 cstate->base.adjusted_mode.crtc_htotal, 3394 cstate->base.adjusted_mode.crtc_htotal,
3215 width, 3395 width,
3216 cpp, 3396 cpp,
@@ -3224,7 +3404,7 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3224 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { 3404 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3225 uint32_t min_scanlines = 4; 3405 uint32_t min_scanlines = 4;
3226 uint32_t y_tile_minimum; 3406 uint32_t y_tile_minimum;
3227 if (intel_rotation_90_or_270(plane->state->rotation)) { 3407 if (intel_rotation_90_or_270(pstate->rotation)) {
3228 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ? 3408 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
3229 drm_format_plane_cpp(fb->pixel_format, 1) : 3409 drm_format_plane_cpp(fb->pixel_format, 1) :
3230 drm_format_plane_cpp(fb->pixel_format, 0); 3410 drm_format_plane_cpp(fb->pixel_format, 0);
@@ -3260,40 +3440,99 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3260 res_blocks++; 3440 res_blocks++;
3261 } 3441 }
3262 3442
3263 if (res_blocks >= ddb_allocation || res_lines > 31) 3443 if (res_blocks >= ddb_allocation || res_lines > 31) {
3264 return false; 3444 *enabled = false;
3445
3446 /*
3447 * If there are no valid level 0 watermarks, then we can't
3448 * support this display configuration.
3449 */
3450 if (level) {
3451 return 0;
3452 } else {
3453 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
3454 DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
3455 to_intel_crtc(cstate->base.crtc)->pipe,
3456 skl_wm_plane_id(to_intel_plane(pstate->plane)),
3457 res_blocks, ddb_allocation, res_lines);
3458
3459 return -EINVAL;
3460 }
3461 }
3265 3462
3266 *out_blocks = res_blocks; 3463 *out_blocks = res_blocks;
3267 *out_lines = res_lines; 3464 *out_lines = res_lines;
3465 *enabled = true;
3268 3466
3269 return true; 3467 return 0;
3270} 3468}
3271 3469
3272static void skl_compute_wm_level(const struct drm_i915_private *dev_priv, 3470static int
3273 struct skl_ddb_allocation *ddb, 3471skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3274 struct intel_crtc_state *cstate, 3472 struct skl_ddb_allocation *ddb,
3275 int level, 3473 struct intel_crtc_state *cstate,
3276 struct skl_wm_level *result) 3474 int level,
3475 struct skl_wm_level *result)
3277{ 3476{
3278 struct drm_device *dev = dev_priv->dev; 3477 struct drm_device *dev = dev_priv->dev;
3478 struct drm_atomic_state *state = cstate->base.state;
3279 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 3479 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3480 struct drm_plane *plane;
3280 struct intel_plane *intel_plane; 3481 struct intel_plane *intel_plane;
3482 struct intel_plane_state *intel_pstate;
3281 uint16_t ddb_blocks; 3483 uint16_t ddb_blocks;
3282 enum pipe pipe = intel_crtc->pipe; 3484 enum pipe pipe = intel_crtc->pipe;
3485 int ret;
3283 3486
3284 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3487 /*
3488 * We'll only calculate watermarks for planes that are actually
3489 * enabled, so make sure all other planes are set as disabled.
3490 */
3491 memset(result, 0, sizeof(*result));
3492
3493 for_each_intel_plane_mask(dev, intel_plane, cstate->base.plane_mask) {
3285 int i = skl_wm_plane_id(intel_plane); 3494 int i = skl_wm_plane_id(intel_plane);
3286 3495
3496 plane = &intel_plane->base;
3497 intel_pstate = NULL;
3498 if (state)
3499 intel_pstate =
3500 intel_atomic_get_existing_plane_state(state,
3501 intel_plane);
3502
3503 /*
3504 * Note: If we start supporting multiple pending atomic commits
3505 * against the same planes/CRTC's in the future, plane->state
3506 * will no longer be the correct pre-state to use for the
3507 * calculations here and we'll need to change where we get the
3508 * 'unchanged' plane data from.
3509 *
3510 * For now this is fine because we only allow one queued commit
3511 * against a CRTC. Even if the plane isn't modified by this
3512 * transaction and we don't have a plane lock, we still have
3513 * the CRTC's lock, so we know that no other transactions are
3514 * racing with us to update it.
3515 */
3516 if (!intel_pstate)
3517 intel_pstate = to_intel_plane_state(plane->state);
3518
3519 WARN_ON(!intel_pstate->base.fb);
3520
3287 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); 3521 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3288 3522
3289 result->plane_en[i] = skl_compute_plane_wm(dev_priv, 3523 ret = skl_compute_plane_wm(dev_priv,
3290 cstate, 3524 cstate,
3291 intel_plane, 3525 intel_pstate,
3292 ddb_blocks, 3526 ddb_blocks,
3293 level, 3527 level,
3294 &result->plane_res_b[i], 3528 &result->plane_res_b[i],
3295 &result->plane_res_l[i]); 3529 &result->plane_res_l[i],
3530 &result->plane_en[i]);
3531 if (ret)
3532 return ret;
3296 } 3533 }
3534
3535 return 0;
3297} 3536}
3298 3537
3299static uint32_t 3538static uint32_t
@@ -3327,21 +3566,26 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
3327 } 3566 }
3328} 3567}
3329 3568
3330static void skl_compute_pipe_wm(struct intel_crtc_state *cstate, 3569static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
3331 struct skl_ddb_allocation *ddb, 3570 struct skl_ddb_allocation *ddb,
3332 struct skl_pipe_wm *pipe_wm) 3571 struct skl_pipe_wm *pipe_wm)
3333{ 3572{
3334 struct drm_device *dev = cstate->base.crtc->dev; 3573 struct drm_device *dev = cstate->base.crtc->dev;
3335 const struct drm_i915_private *dev_priv = dev->dev_private; 3574 const struct drm_i915_private *dev_priv = dev->dev_private;
3336 int level, max_level = ilk_wm_max_level(dev); 3575 int level, max_level = ilk_wm_max_level(dev);
3576 int ret;
3337 3577
3338 for (level = 0; level <= max_level; level++) { 3578 for (level = 0; level <= max_level; level++) {
3339 skl_compute_wm_level(dev_priv, ddb, cstate, 3579 ret = skl_compute_wm_level(dev_priv, ddb, cstate,
3340 level, &pipe_wm->wm[level]); 3580 level, &pipe_wm->wm[level]);
3581 if (ret)
3582 return ret;
3341 } 3583 }
3342 pipe_wm->linetime = skl_compute_linetime_wm(cstate); 3584 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
3343 3585
3344 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm); 3586 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
3587
3588 return 0;
3345} 3589}
3346 3590
3347static void skl_compute_wm_results(struct drm_device *dev, 3591static void skl_compute_wm_results(struct drm_device *dev,
@@ -3421,7 +3665,9 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3421 int i, level, max_level = ilk_wm_max_level(dev); 3665 int i, level, max_level = ilk_wm_max_level(dev);
3422 enum pipe pipe = crtc->pipe; 3666 enum pipe pipe = crtc->pipe;
3423 3667
3424 if (!new->dirty[pipe]) 3668 if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0)
3669 continue;
3670 if (!crtc->active)
3425 continue; 3671 continue;
3426 3672
3427 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]); 3673 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
@@ -3588,87 +3834,144 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3588 } 3834 }
3589} 3835}
3590 3836
3591static bool skl_update_pipe_wm(struct drm_crtc *crtc, 3837static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
3592 struct skl_ddb_allocation *ddb, /* out */ 3838 struct skl_ddb_allocation *ddb, /* out */
3593 struct skl_pipe_wm *pipe_wm /* out */) 3839 struct skl_pipe_wm *pipe_wm, /* out */
3840 bool *changed /* out */)
3594{ 3841{
3595 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3842 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc);
3596 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 3843 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
3844 int ret;
3597 3845
3598 skl_allocate_pipe_ddb(cstate, ddb); 3846 ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
3599 skl_compute_pipe_wm(cstate, ddb, pipe_wm); 3847 if (ret)
3848 return ret;
3600 3849
3601 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm))) 3850 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
3602 return false; 3851 *changed = false;
3603 3852 else
3604 intel_crtc->wm.active.skl = *pipe_wm; 3853 *changed = true;
3605 3854
3606 return true; 3855 return 0;
3607} 3856}
3608 3857
3609static void skl_update_other_pipe_wm(struct drm_device *dev, 3858static int
3610 struct drm_crtc *crtc, 3859skl_compute_ddb(struct drm_atomic_state *state)
3611 struct skl_wm_values *r)
3612{ 3860{
3861 struct drm_device *dev = state->dev;
3862 struct drm_i915_private *dev_priv = to_i915(dev);
3863 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3613 struct intel_crtc *intel_crtc; 3864 struct intel_crtc *intel_crtc;
3614 struct intel_crtc *this_crtc = to_intel_crtc(crtc); 3865 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
3866 unsigned realloc_pipes = dev_priv->active_crtcs;
3867 int ret;
3615 3868
3616 /* 3869 /*
3617 * If the WM update hasn't changed the allocation for this_crtc (the 3870 * If this is our first atomic update following hardware readout,
3618 * crtc we are currently computing the new WM values for), other 3871 * we can't trust the DDB that the BIOS programmed for us. Let's
3619 * enabled crtcs will keep the same allocation and we don't need to 3872 * pretend that all pipes switched active status so that we'll
3620 * recompute anything for them. 3873 * ensure a full DDB recompute.
3621 */ 3874 */
3622 if (!skl_ddb_allocation_changed(&r->ddb, this_crtc)) 3875 if (dev_priv->wm.distrust_bios_wm)
3623 return; 3876 intel_state->active_pipe_changes = ~0;
3624 3877
3625 /* 3878 /*
3626 * Otherwise, because of this_crtc being freshly enabled/disabled, the 3879 * If the modeset changes which CRTC's are active, we need to
3627 * other active pipes need new DDB allocation and WM values. 3880 * recompute the DDB allocation for *all* active pipes, even
3881 * those that weren't otherwise being modified in any way by this
3882 * atomic commit. Due to the shrinking of the per-pipe allocations
3883 * when new active CRTC's are added, it's possible for a pipe that
3884 * we were already using and aren't changing at all here to suddenly
3885 * become invalid if its DDB needs exceeds its new allocation.
3886 *
3887 * Note that if we wind up doing a full DDB recompute, we can't let
3888 * any other display updates race with this transaction, so we need
3889 * to grab the lock on *all* CRTC's.
3628 */ 3890 */
3629 for_each_intel_crtc(dev, intel_crtc) { 3891 if (intel_state->active_pipe_changes) {
3630 struct skl_pipe_wm pipe_wm = {}; 3892 realloc_pipes = ~0;
3631 bool wm_changed; 3893 intel_state->wm_results.dirty_pipes = ~0;
3632 3894 }
3633 if (this_crtc->pipe == intel_crtc->pipe)
3634 continue;
3635
3636 if (!intel_crtc->active)
3637 continue;
3638 3895
3639 wm_changed = skl_update_pipe_wm(&intel_crtc->base, 3896 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
3640 &r->ddb, &pipe_wm); 3897 struct intel_crtc_state *cstate;
3641 3898
3642 /* 3899 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
3643 * If we end up re-computing the other pipe WM values, it's 3900 if (IS_ERR(cstate))
3644 * because it was really needed, so we expect the WM values to 3901 return PTR_ERR(cstate);
3645 * be different.
3646 */
3647 WARN_ON(!wm_changed);
3648 3902
3649 skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc); 3903 ret = skl_allocate_pipe_ddb(cstate, ddb);
3650 r->dirty[intel_crtc->pipe] = true; 3904 if (ret)
3905 return ret;
3651 } 3906 }
3907
3908 return 0;
3652} 3909}
3653 3910
3654static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe) 3911static int
3912skl_compute_wm(struct drm_atomic_state *state)
3655{ 3913{
3656 watermarks->wm_linetime[pipe] = 0; 3914 struct drm_crtc *crtc;
3657 memset(watermarks->plane[pipe], 0, 3915 struct drm_crtc_state *cstate;
3658 sizeof(uint32_t) * 8 * I915_MAX_PLANES); 3916 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3659 memset(watermarks->plane_trans[pipe], 3917 struct skl_wm_values *results = &intel_state->wm_results;
3660 0, sizeof(uint32_t) * I915_MAX_PLANES); 3918 struct skl_pipe_wm *pipe_wm;
3661 watermarks->plane_trans[pipe][PLANE_CURSOR] = 0; 3919 bool changed = false;
3920 int ret, i;
3662 3921
3663 /* Clear ddb entries for pipe */ 3922 /*
3664 memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry)); 3923 * If this transaction isn't actually touching any CRTC's, don't
3665 memset(&watermarks->ddb.plane[pipe], 0, 3924 * bother with watermark calculation. Note that if we pass this
3666 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES); 3925 * test, we're guaranteed to hold at least one CRTC state mutex,
3667 memset(&watermarks->ddb.y_plane[pipe], 0, 3926 * which means we can safely use values like dev_priv->active_crtcs
3668 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES); 3927 * since any racing commits that want to update them would need to
3669 memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0, 3928 * hold _all_ CRTC state mutexes.
3670 sizeof(struct skl_ddb_entry)); 3929 */
3930 for_each_crtc_in_state(state, crtc, cstate, i)
3931 changed = true;
3932 if (!changed)
3933 return 0;
3934
3935 /* Clear all dirty flags */
3936 results->dirty_pipes = 0;
3937
3938 ret = skl_compute_ddb(state);
3939 if (ret)
3940 return ret;
3941
3942 /*
3943 * Calculate WM's for all pipes that are part of this transaction.
3944 * Note that the DDB allocation above may have added more CRTC's that
3945 * weren't otherwise being modified (and set bits in dirty_pipes) if
3946 * pipe allocations had to change.
3947 *
3948 * FIXME: Now that we're doing this in the atomic check phase, we
3949 * should allow skl_update_pipe_wm() to return failure in cases where
3950 * no suitable watermark values can be found.
3951 */
3952 for_each_crtc_in_state(state, crtc, cstate, i) {
3953 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3954 struct intel_crtc_state *intel_cstate =
3955 to_intel_crtc_state(cstate);
3956
3957 pipe_wm = &intel_cstate->wm.skl.optimal;
3958 ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm,
3959 &changed);
3960 if (ret)
3961 return ret;
3962
3963 if (changed)
3964 results->dirty_pipes |= drm_crtc_mask(crtc);
3965
3966 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
3967 /* This pipe's WM's did not change */
3968 continue;
3969
3970 intel_cstate->update_wm_pre = true;
3971 skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc);
3972 }
3671 3973
3974 return 0;
3672} 3975}
3673 3976
3674static void skl_update_wm(struct drm_crtc *crtc) 3977static void skl_update_wm(struct drm_crtc *crtc)
@@ -3678,26 +3981,22 @@ static void skl_update_wm(struct drm_crtc *crtc)
3678 struct drm_i915_private *dev_priv = dev->dev_private; 3981 struct drm_i915_private *dev_priv = dev->dev_private;
3679 struct skl_wm_values *results = &dev_priv->wm.skl_results; 3982 struct skl_wm_values *results = &dev_priv->wm.skl_results;
3680 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 3983 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3681 struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl; 3984 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
3682
3683 3985
3684 /* Clear all dirty flags */ 3986 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
3685 memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
3686
3687 skl_clear_wm(results, intel_crtc->pipe);
3688
3689 if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm))
3690 return; 3987 return;
3691 3988
3692 skl_compute_wm_results(dev, pipe_wm, results, intel_crtc); 3989 intel_crtc->wm.active.skl = *pipe_wm;
3693 results->dirty[intel_crtc->pipe] = true; 3990
3991 mutex_lock(&dev_priv->wm.wm_mutex);
3694 3992
3695 skl_update_other_pipe_wm(dev, crtc, results);
3696 skl_write_wm_values(dev_priv, results); 3993 skl_write_wm_values(dev_priv, results);
3697 skl_flush_wm_values(dev_priv, results); 3994 skl_flush_wm_values(dev_priv, results);
3698 3995
3699 /* store the new configuration */ 3996 /* store the new configuration */
3700 dev_priv->wm.skl_hw = *results; 3997 dev_priv->wm.skl_hw = *results;
3998
3999 mutex_unlock(&dev_priv->wm.wm_mutex);
3701} 4000}
3702 4001
3703static void ilk_compute_wm_config(struct drm_device *dev, 4002static void ilk_compute_wm_config(struct drm_device *dev,
@@ -3757,7 +4056,7 @@ static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
3757 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 4056 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3758 4057
3759 mutex_lock(&dev_priv->wm.wm_mutex); 4058 mutex_lock(&dev_priv->wm.wm_mutex);
3760 intel_crtc->wm.active.ilk = cstate->wm.intermediate; 4059 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
3761 ilk_program_watermarks(dev_priv); 4060 ilk_program_watermarks(dev_priv);
3762 mutex_unlock(&dev_priv->wm.wm_mutex); 4061 mutex_unlock(&dev_priv->wm.wm_mutex);
3763} 4062}
@@ -3769,7 +4068,7 @@ static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
3769 4068
3770 mutex_lock(&dev_priv->wm.wm_mutex); 4069 mutex_lock(&dev_priv->wm.wm_mutex);
3771 if (cstate->wm.need_postvbl_update) { 4070 if (cstate->wm.need_postvbl_update) {
3772 intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk; 4071 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
3773 ilk_program_watermarks(dev_priv); 4072 ilk_program_watermarks(dev_priv);
3774 } 4073 }
3775 mutex_unlock(&dev_priv->wm.wm_mutex); 4074 mutex_unlock(&dev_priv->wm.wm_mutex);
@@ -3826,7 +4125,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3826 struct skl_wm_values *hw = &dev_priv->wm.skl_hw; 4125 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
3827 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4126 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3828 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4127 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3829 struct skl_pipe_wm *active = &cstate->wm.optimal.skl; 4128 struct skl_pipe_wm *active = &cstate->wm.skl.optimal;
3830 enum pipe pipe = intel_crtc->pipe; 4129 enum pipe pipe = intel_crtc->pipe;
3831 int level, i, max_level; 4130 int level, i, max_level;
3832 uint32_t temp; 4131 uint32_t temp;
@@ -3849,7 +4148,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3849 if (!intel_crtc->active) 4148 if (!intel_crtc->active)
3850 return; 4149 return;
3851 4150
3852 hw->dirty[pipe] = true; 4151 hw->dirty_pipes |= drm_crtc_mask(crtc);
3853 4152
3854 active->linetime = hw->wm_linetime[pipe]; 4153 active->linetime = hw->wm_linetime[pipe];
3855 4154
@@ -3883,6 +4182,14 @@ void skl_wm_get_hw_state(struct drm_device *dev)
3883 skl_ddb_get_hw_state(dev_priv, ddb); 4182 skl_ddb_get_hw_state(dev_priv, ddb);
3884 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 4183 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3885 skl_pipe_wm_get_hw_state(crtc); 4184 skl_pipe_wm_get_hw_state(crtc);
4185
4186 if (dev_priv->active_crtcs) {
4187 /* Fully recompute DDB on first atomic commit */
4188 dev_priv->wm.distrust_bios_wm = true;
4189 } else {
4190 /* Easy/common case; just sanitize DDB now if everything off */
4191 memset(ddb, 0, sizeof(*ddb));
4192 }
3886} 4193}
3887 4194
3888static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) 4195static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
@@ -3892,7 +4199,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3892 struct ilk_wm_values *hw = &dev_priv->wm.hw; 4199 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3893 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4200 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3894 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4201 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3895 struct intel_pipe_wm *active = &cstate->wm.optimal.ilk; 4202 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
3896 enum pipe pipe = intel_crtc->pipe; 4203 enum pipe pipe = intel_crtc->pipe;
3897 static const i915_reg_t wm0_pipe_reg[] = { 4204 static const i915_reg_t wm0_pipe_reg[] = {
3898 [PIPE_A] = WM0_PIPEA_ILK, 4205 [PIPE_A] = WM0_PIPEA_ILK,
@@ -4169,9 +4476,8 @@ DEFINE_SPINLOCK(mchdev_lock);
4169 * mchdev_lock. */ 4476 * mchdev_lock. */
4170static struct drm_i915_private *i915_mch_dev; 4477static struct drm_i915_private *i915_mch_dev;
4171 4478
4172bool ironlake_set_drps(struct drm_device *dev, u8 val) 4479bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
4173{ 4480{
4174 struct drm_i915_private *dev_priv = dev->dev_private;
4175 u16 rgvswctl; 4481 u16 rgvswctl;
4176 4482
4177 assert_spin_locked(&mchdev_lock); 4483 assert_spin_locked(&mchdev_lock);
@@ -4193,9 +4499,8 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val)
4193 return true; 4499 return true;
4194} 4500}
4195 4501
4196static void ironlake_enable_drps(struct drm_device *dev) 4502static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
4197{ 4503{
4198 struct drm_i915_private *dev_priv = dev->dev_private;
4199 u32 rgvmodectl; 4504 u32 rgvmodectl;
4200 u8 fmax, fmin, fstart, vstart; 4505 u8 fmax, fmin, fstart, vstart;
4201 4506
@@ -4252,7 +4557,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
4252 DRM_ERROR("stuck trying to change perf mode\n"); 4557 DRM_ERROR("stuck trying to change perf mode\n");
4253 mdelay(1); 4558 mdelay(1);
4254 4559
4255 ironlake_set_drps(dev, fstart); 4560 ironlake_set_drps(dev_priv, fstart);
4256 4561
4257 dev_priv->ips.last_count1 = I915_READ(DMIEC) + 4562 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
4258 I915_READ(DDREC) + I915_READ(CSIEC); 4563 I915_READ(DDREC) + I915_READ(CSIEC);
@@ -4263,9 +4568,8 @@ static void ironlake_enable_drps(struct drm_device *dev)
4263 spin_unlock_irq(&mchdev_lock); 4568 spin_unlock_irq(&mchdev_lock);
4264} 4569}
4265 4570
4266static void ironlake_disable_drps(struct drm_device *dev) 4571static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
4267{ 4572{
4268 struct drm_i915_private *dev_priv = dev->dev_private;
4269 u16 rgvswctl; 4573 u16 rgvswctl;
4270 4574
4271 spin_lock_irq(&mchdev_lock); 4575 spin_lock_irq(&mchdev_lock);
@@ -4280,7 +4584,7 @@ static void ironlake_disable_drps(struct drm_device *dev)
4280 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 4584 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4281 4585
4282 /* Go back to the starting frequency */ 4586 /* Go back to the starting frequency */
4283 ironlake_set_drps(dev, dev_priv->ips.fstart); 4587 ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
4284 mdelay(1); 4588 mdelay(1);
4285 rgvswctl |= MEMCTL_CMD_STS; 4589 rgvswctl |= MEMCTL_CMD_STS;
4286 I915_WRITE(MEMSWCTL, rgvswctl); 4590 I915_WRITE(MEMSWCTL, rgvswctl);
@@ -4424,12 +4728,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4424/* gen6_set_rps is called to update the frequency request, but should also be 4728/* gen6_set_rps is called to update the frequency request, but should also be
4425 * called when the range (min_delay and max_delay) is modified so that we can 4729 * called when the range (min_delay and max_delay) is modified so that we can
4426 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ 4730 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4427static void gen6_set_rps(struct drm_device *dev, u8 val) 4731static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
4428{ 4732{
4429 struct drm_i915_private *dev_priv = dev->dev_private;
4430
4431 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ 4733 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4432 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 4734 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
4433 return; 4735 return;
4434 4736
4435 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4737 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -4442,10 +4744,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
4442 if (val != dev_priv->rps.cur_freq) { 4744 if (val != dev_priv->rps.cur_freq) {
4443 gen6_set_rps_thresholds(dev_priv, val); 4745 gen6_set_rps_thresholds(dev_priv, val);
4444 4746
4445 if (IS_GEN9(dev)) 4747 if (IS_GEN9(dev_priv))
4446 I915_WRITE(GEN6_RPNSWREQ, 4748 I915_WRITE(GEN6_RPNSWREQ,
4447 GEN9_FREQUENCY(val)); 4749 GEN9_FREQUENCY(val));
4448 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 4750 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4449 I915_WRITE(GEN6_RPNSWREQ, 4751 I915_WRITE(GEN6_RPNSWREQ,
4450 HSW_FREQUENCY(val)); 4752 HSW_FREQUENCY(val));
4451 else 4753 else
@@ -4467,15 +4769,13 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
4467 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); 4769 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4468} 4770}
4469 4771
4470static void valleyview_set_rps(struct drm_device *dev, u8 val) 4772static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
4471{ 4773{
4472 struct drm_i915_private *dev_priv = dev->dev_private;
4473
4474 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4774 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4475 WARN_ON(val > dev_priv->rps.max_freq); 4775 WARN_ON(val > dev_priv->rps.max_freq);
4476 WARN_ON(val < dev_priv->rps.min_freq); 4776 WARN_ON(val < dev_priv->rps.min_freq);
4477 4777
4478 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1), 4778 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
4479 "Odd GPU freq value\n")) 4779 "Odd GPU freq value\n"))
4480 val &= ~1; 4780 val &= ~1;
4481 4781
@@ -4508,7 +4808,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4508 /* Wake up the media well, as that takes a lot less 4808 /* Wake up the media well, as that takes a lot less
4509 * power than the Render well. */ 4809 * power than the Render well. */
4510 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); 4810 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
4511 valleyview_set_rps(dev_priv->dev, val); 4811 valleyview_set_rps(dev_priv, val);
4512 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); 4812 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
4513} 4813}
4514 4814
@@ -4526,14 +4826,12 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
4526 4826
4527void gen6_rps_idle(struct drm_i915_private *dev_priv) 4827void gen6_rps_idle(struct drm_i915_private *dev_priv)
4528{ 4828{
4529 struct drm_device *dev = dev_priv->dev;
4530
4531 mutex_lock(&dev_priv->rps.hw_lock); 4829 mutex_lock(&dev_priv->rps.hw_lock);
4532 if (dev_priv->rps.enabled) { 4830 if (dev_priv->rps.enabled) {
4533 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 4831 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4534 vlv_set_rps_idle(dev_priv); 4832 vlv_set_rps_idle(dev_priv);
4535 else 4833 else
4536 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 4834 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
4537 dev_priv->rps.last_adj = 0; 4835 dev_priv->rps.last_adj = 0;
4538 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 4836 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
4539 } 4837 }
@@ -4581,49 +4879,39 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
4581 spin_unlock(&dev_priv->rps.client_lock); 4879 spin_unlock(&dev_priv->rps.client_lock);
4582} 4880}
4583 4881
4584void intel_set_rps(struct drm_device *dev, u8 val) 4882void intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
4585{ 4883{
4586 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 4884 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4587 valleyview_set_rps(dev, val); 4885 valleyview_set_rps(dev_priv, val);
4588 else 4886 else
4589 gen6_set_rps(dev, val); 4887 gen6_set_rps(dev_priv, val);
4590} 4888}
4591 4889
4592static void gen9_disable_rc6(struct drm_device *dev) 4890static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
4593{ 4891{
4594 struct drm_i915_private *dev_priv = dev->dev_private;
4595
4596 I915_WRITE(GEN6_RC_CONTROL, 0); 4892 I915_WRITE(GEN6_RC_CONTROL, 0);
4597 I915_WRITE(GEN9_PG_ENABLE, 0); 4893 I915_WRITE(GEN9_PG_ENABLE, 0);
4598} 4894}
4599 4895
4600static void gen9_disable_rps(struct drm_device *dev) 4896static void gen9_disable_rps(struct drm_i915_private *dev_priv)
4601{ 4897{
4602 struct drm_i915_private *dev_priv = dev->dev_private;
4603
4604 I915_WRITE(GEN6_RP_CONTROL, 0); 4898 I915_WRITE(GEN6_RP_CONTROL, 0);
4605} 4899}
4606 4900
4607static void gen6_disable_rps(struct drm_device *dev) 4901static void gen6_disable_rps(struct drm_i915_private *dev_priv)
4608{ 4902{
4609 struct drm_i915_private *dev_priv = dev->dev_private;
4610
4611 I915_WRITE(GEN6_RC_CONTROL, 0); 4903 I915_WRITE(GEN6_RC_CONTROL, 0);
4612 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 4904 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
4613 I915_WRITE(GEN6_RP_CONTROL, 0); 4905 I915_WRITE(GEN6_RP_CONTROL, 0);
4614} 4906}
4615 4907
4616static void cherryview_disable_rps(struct drm_device *dev) 4908static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
4617{ 4909{
4618 struct drm_i915_private *dev_priv = dev->dev_private;
4619
4620 I915_WRITE(GEN6_RC_CONTROL, 0); 4910 I915_WRITE(GEN6_RC_CONTROL, 0);
4621} 4911}
4622 4912
4623static void valleyview_disable_rps(struct drm_device *dev) 4913static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
4624{ 4914{
4625 struct drm_i915_private *dev_priv = dev->dev_private;
4626
4627 /* we're doing forcewake before Disabling RC6, 4915 /* we're doing forcewake before Disabling RC6,
4628 * This what the BIOS expects when going into suspend */ 4916 * This what the BIOS expects when going into suspend */
4629 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4917 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4633,15 +4921,15 @@ static void valleyview_disable_rps(struct drm_device *dev)
4633 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4921 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4634} 4922}
4635 4923
4636static void intel_print_rc6_info(struct drm_device *dev, u32 mode) 4924static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
4637{ 4925{
4638 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 4926 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4639 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1))) 4927 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
4640 mode = GEN6_RC_CTL_RC6_ENABLE; 4928 mode = GEN6_RC_CTL_RC6_ENABLE;
4641 else 4929 else
4642 mode = 0; 4930 mode = 0;
4643 } 4931 }
4644 if (HAS_RC6p(dev)) 4932 if (HAS_RC6p(dev_priv))
4645 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", 4933 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
4646 onoff(mode & GEN6_RC_CTL_RC6_ENABLE), 4934 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
4647 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE), 4935 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
@@ -4652,9 +4940,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
4652 onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); 4940 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
4653} 4941}
4654 4942
4655static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) 4943static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
4656{ 4944{
4657 struct drm_i915_private *dev_priv = to_i915(dev);
4658 struct i915_ggtt *ggtt = &dev_priv->ggtt; 4945 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4659 bool enable_rc6 = true; 4946 bool enable_rc6 = true;
4660 unsigned long rc6_ctx_base; 4947 unsigned long rc6_ctx_base;
@@ -4695,16 +4982,16 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
4695 return enable_rc6; 4982 return enable_rc6;
4696} 4983}
4697 4984
4698int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) 4985int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
4699{ 4986{
4700 /* No RC6 before Ironlake and code is gone for ilk. */ 4987 /* No RC6 before Ironlake and code is gone for ilk. */
4701 if (INTEL_INFO(dev)->gen < 6) 4988 if (INTEL_INFO(dev_priv)->gen < 6)
4702 return 0; 4989 return 0;
4703 4990
4704 if (!enable_rc6) 4991 if (!enable_rc6)
4705 return 0; 4992 return 0;
4706 4993
4707 if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) { 4994 if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
4708 DRM_INFO("RC6 disabled by BIOS\n"); 4995 DRM_INFO("RC6 disabled by BIOS\n");
4709 return 0; 4996 return 0;
4710 } 4997 }
@@ -4713,7 +5000,7 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
4713 if (enable_rc6 >= 0) { 5000 if (enable_rc6 >= 0) {
4714 int mask; 5001 int mask;
4715 5002
4716 if (HAS_RC6p(dev)) 5003 if (HAS_RC6p(dev_priv))
4717 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | 5004 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
4718 INTEL_RC6pp_ENABLE; 5005 INTEL_RC6pp_ENABLE;
4719 else 5006 else
@@ -4726,20 +5013,14 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
4726 return enable_rc6 & mask; 5013 return enable_rc6 & mask;
4727 } 5014 }
4728 5015
4729 if (IS_IVYBRIDGE(dev)) 5016 if (IS_IVYBRIDGE(dev_priv))
4730 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 5017 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
4731 5018
4732 return INTEL_RC6_ENABLE; 5019 return INTEL_RC6_ENABLE;
4733} 5020}
4734 5021
4735int intel_enable_rc6(const struct drm_device *dev) 5022static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
4736{
4737 return i915.enable_rc6;
4738}
4739
4740static void gen6_init_rps_frequencies(struct drm_device *dev)
4741{ 5023{
4742 struct drm_i915_private *dev_priv = dev->dev_private;
4743 uint32_t rp_state_cap; 5024 uint32_t rp_state_cap;
4744 u32 ddcc_status = 0; 5025 u32 ddcc_status = 0;
4745 int ret; 5026 int ret;
@@ -4747,7 +5028,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4747 /* All of these values are in units of 50MHz */ 5028 /* All of these values are in units of 50MHz */
4748 dev_priv->rps.cur_freq = 0; 5029 dev_priv->rps.cur_freq = 0;
4749 /* static values from HW: RP0 > RP1 > RPn (min_freq) */ 5030 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
4750 if (IS_BROXTON(dev)) { 5031 if (IS_BROXTON(dev_priv)) {
4751 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 5032 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
4752 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; 5033 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
4753 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; 5034 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
@@ -4763,8 +5044,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4763 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; 5044 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4764 5045
4765 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; 5046 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
4766 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || 5047 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
4767 IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5048 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
4768 ret = sandybridge_pcode_read(dev_priv, 5049 ret = sandybridge_pcode_read(dev_priv,
4769 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 5050 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4770 &ddcc_status); 5051 &ddcc_status);
@@ -4776,7 +5057,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4776 dev_priv->rps.max_freq); 5057 dev_priv->rps.max_freq);
4777 } 5058 }
4778 5059
4779 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5060 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
4780 /* Store the frequency values in 16.66 MHZ units, which is 5061 /* Store the frequency values in 16.66 MHZ units, which is
4781 the natural hardware unit for SKL */ 5062 the natural hardware unit for SKL */
4782 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; 5063 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
@@ -4793,7 +5074,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4793 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 5074 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4794 5075
4795 if (dev_priv->rps.min_freq_softlimit == 0) { 5076 if (dev_priv->rps.min_freq_softlimit == 0) {
4796 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 5077 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4797 dev_priv->rps.min_freq_softlimit = 5078 dev_priv->rps.min_freq_softlimit =
4798 max_t(int, dev_priv->rps.efficient_freq, 5079 max_t(int, dev_priv->rps.efficient_freq,
4799 intel_freq_opcode(dev_priv, 450)); 5080 intel_freq_opcode(dev_priv, 450));
@@ -4804,16 +5085,14 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4804} 5085}
4805 5086
4806/* See the Gen9_GT_PM_Programming_Guide doc for the below */ 5087/* See the Gen9_GT_PM_Programming_Guide doc for the below */
4807static void gen9_enable_rps(struct drm_device *dev) 5088static void gen9_enable_rps(struct drm_i915_private *dev_priv)
4808{ 5089{
4809 struct drm_i915_private *dev_priv = dev->dev_private;
4810
4811 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5090 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4812 5091
4813 gen6_init_rps_frequencies(dev); 5092 gen6_init_rps_frequencies(dev_priv);
4814 5093
4815 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ 5094 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4816 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 5095 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
4817 /* 5096 /*
4818 * BIOS could leave the Hw Turbo enabled, so need to explicitly 5097 * BIOS could leave the Hw Turbo enabled, so need to explicitly
4819 * clear out the Control register just to avoid inconsitency 5098 * clear out the Control register just to avoid inconsitency
@@ -4823,7 +5102,7 @@ static void gen9_enable_rps(struct drm_device *dev)
4823 * if the Turbo is left enabled in the Control register, as the 5102 * if the Turbo is left enabled in the Control register, as the
4824 * Up/Down interrupts would remain masked. 5103 * Up/Down interrupts would remain masked.
4825 */ 5104 */
4826 gen9_disable_rps(dev); 5105 gen9_disable_rps(dev_priv);
4827 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5106 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4828 return; 5107 return;
4829 } 5108 }
@@ -4842,14 +5121,13 @@ static void gen9_enable_rps(struct drm_device *dev)
4842 * Up/Down EI & threshold registers, as well as the RP_CONTROL, 5121 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
4843 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ 5122 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
4844 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 5123 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4845 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 5124 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
4846 5125
4847 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5126 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4848} 5127}
4849 5128
4850static void gen9_enable_rc6(struct drm_device *dev) 5129static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
4851{ 5130{
4852 struct drm_i915_private *dev_priv = dev->dev_private;
4853 struct intel_engine_cs *engine; 5131 struct intel_engine_cs *engine;
4854 uint32_t rc6_mask = 0; 5132 uint32_t rc6_mask = 0;
4855 5133
@@ -4866,7 +5144,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
4866 /* 2b: Program RC6 thresholds.*/ 5144 /* 2b: Program RC6 thresholds.*/
4867 5145
4868 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ 5146 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
4869 if (IS_SKYLAKE(dev)) 5147 if (IS_SKYLAKE(dev_priv))
4870 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); 5148 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
4871 else 5149 else
4872 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); 5150 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
@@ -4875,7 +5153,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
4875 for_each_engine(engine, dev_priv) 5153 for_each_engine(engine, dev_priv)
4876 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 5154 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
4877 5155
4878 if (HAS_GUC_UCODE(dev)) 5156 if (HAS_GUC(dev_priv))
4879 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); 5157 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
4880 5158
4881 I915_WRITE(GEN6_RC_SLEEP, 0); 5159 I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -4885,12 +5163,12 @@ static void gen9_enable_rc6(struct drm_device *dev)
4885 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25); 5163 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
4886 5164
4887 /* 3a: Enable RC6 */ 5165 /* 3a: Enable RC6 */
4888 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 5166 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
4889 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 5167 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4890 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE)); 5168 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
4891 /* WaRsUseTimeoutMode */ 5169 /* WaRsUseTimeoutMode */
4892 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || 5170 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
4893 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 5171 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
4894 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */ 5172 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
4895 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 5173 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4896 GEN7_RC_CTL_TO_MODE | 5174 GEN7_RC_CTL_TO_MODE |
@@ -4906,19 +5184,17 @@ static void gen9_enable_rc6(struct drm_device *dev)
4906 * 3b: Enable Coarse Power Gating only when RC6 is enabled. 5184 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
4907 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. 5185 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
4908 */ 5186 */
4909 if (NEEDS_WaRsDisableCoarsePowerGating(dev)) 5187 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
4910 I915_WRITE(GEN9_PG_ENABLE, 0); 5188 I915_WRITE(GEN9_PG_ENABLE, 0);
4911 else 5189 else
4912 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 5190 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4913 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0); 5191 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
4914 5192
4915 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5193 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4916
4917} 5194}
4918 5195
4919static void gen8_enable_rps(struct drm_device *dev) 5196static void gen8_enable_rps(struct drm_i915_private *dev_priv)
4920{ 5197{
4921 struct drm_i915_private *dev_priv = dev->dev_private;
4922 struct intel_engine_cs *engine; 5198 struct intel_engine_cs *engine;
4923 uint32_t rc6_mask = 0; 5199 uint32_t rc6_mask = 0;
4924 5200
@@ -4933,7 +5209,7 @@ static void gen8_enable_rps(struct drm_device *dev)
4933 I915_WRITE(GEN6_RC_CONTROL, 0); 5209 I915_WRITE(GEN6_RC_CONTROL, 0);
4934 5210
4935 /* Initialize rps frequencies */ 5211 /* Initialize rps frequencies */
4936 gen6_init_rps_frequencies(dev); 5212 gen6_init_rps_frequencies(dev_priv);
4937 5213
4938 /* 2b: Program RC6 thresholds.*/ 5214 /* 2b: Program RC6 thresholds.*/
4939 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 5215 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -4942,16 +5218,16 @@ static void gen8_enable_rps(struct drm_device *dev)
4942 for_each_engine(engine, dev_priv) 5218 for_each_engine(engine, dev_priv)
4943 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 5219 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
4944 I915_WRITE(GEN6_RC_SLEEP, 0); 5220 I915_WRITE(GEN6_RC_SLEEP, 0);
4945 if (IS_BROADWELL(dev)) 5221 if (IS_BROADWELL(dev_priv))
4946 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ 5222 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
4947 else 5223 else
4948 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ 5224 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
4949 5225
4950 /* 3: Enable RC6 */ 5226 /* 3: Enable RC6 */
4951 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 5227 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
4952 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 5228 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4953 intel_print_rc6_info(dev, rc6_mask); 5229 intel_print_rc6_info(dev_priv, rc6_mask);
4954 if (IS_BROADWELL(dev)) 5230 if (IS_BROADWELL(dev_priv))
4955 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 5231 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4956 GEN7_RC_CTL_TO_MODE | 5232 GEN7_RC_CTL_TO_MODE |
4957 rc6_mask); 5233 rc6_mask);
@@ -4992,14 +5268,13 @@ static void gen8_enable_rps(struct drm_device *dev)
4992 /* 6: Ring frequency + overclocking (our driver does this later */ 5268 /* 6: Ring frequency + overclocking (our driver does this later */
4993 5269
4994 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 5270 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4995 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 5271 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
4996 5272
4997 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5273 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4998} 5274}
4999 5275
5000static void gen6_enable_rps(struct drm_device *dev) 5276static void gen6_enable_rps(struct drm_i915_private *dev_priv)
5001{ 5277{
5002 struct drm_i915_private *dev_priv = dev->dev_private;
5003 struct intel_engine_cs *engine; 5278 struct intel_engine_cs *engine;
5004 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; 5279 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
5005 u32 gtfifodbg; 5280 u32 gtfifodbg;
@@ -5026,7 +5301,7 @@ static void gen6_enable_rps(struct drm_device *dev)
5026 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5301 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5027 5302
5028 /* Initialize rps frequencies */ 5303 /* Initialize rps frequencies */
5029 gen6_init_rps_frequencies(dev); 5304 gen6_init_rps_frequencies(dev_priv);
5030 5305
5031 /* disable the counters and set deterministic thresholds */ 5306 /* disable the counters and set deterministic thresholds */
5032 I915_WRITE(GEN6_RC_CONTROL, 0); 5307 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -5042,7 +5317,7 @@ static void gen6_enable_rps(struct drm_device *dev)
5042 5317
5043 I915_WRITE(GEN6_RC_SLEEP, 0); 5318 I915_WRITE(GEN6_RC_SLEEP, 0);
5044 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 5319 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
5045 if (IS_IVYBRIDGE(dev)) 5320 if (IS_IVYBRIDGE(dev_priv))
5046 I915_WRITE(GEN6_RC6_THRESHOLD, 125000); 5321 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
5047 else 5322 else
5048 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 5323 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
@@ -5050,12 +5325,12 @@ static void gen6_enable_rps(struct drm_device *dev)
5050 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 5325 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
5051 5326
5052 /* Check if we are enabling RC6 */ 5327 /* Check if we are enabling RC6 */
5053 rc6_mode = intel_enable_rc6(dev_priv->dev); 5328 rc6_mode = intel_enable_rc6();
5054 if (rc6_mode & INTEL_RC6_ENABLE) 5329 if (rc6_mode & INTEL_RC6_ENABLE)
5055 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; 5330 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
5056 5331
5057 /* We don't use those on Haswell */ 5332 /* We don't use those on Haswell */
5058 if (!IS_HASWELL(dev)) { 5333 if (!IS_HASWELL(dev_priv)) {
5059 if (rc6_mode & INTEL_RC6p_ENABLE) 5334 if (rc6_mode & INTEL_RC6p_ENABLE)
5060 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; 5335 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
5061 5336
@@ -5063,7 +5338,7 @@ static void gen6_enable_rps(struct drm_device *dev)
5063 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; 5338 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
5064 } 5339 }
5065 5340
5066 intel_print_rc6_info(dev, rc6_mask); 5341 intel_print_rc6_info(dev_priv, rc6_mask);
5067 5342
5068 I915_WRITE(GEN6_RC_CONTROL, 5343 I915_WRITE(GEN6_RC_CONTROL,
5069 rc6_mask | 5344 rc6_mask |
@@ -5087,13 +5362,13 @@ static void gen6_enable_rps(struct drm_device *dev)
5087 } 5362 }
5088 5363
5089 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 5364 dev_priv->rps.power = HIGH_POWER; /* force a reset */
5090 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 5365 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
5091 5366
5092 rc6vids = 0; 5367 rc6vids = 0;
5093 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 5368 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
5094 if (IS_GEN6(dev) && ret) { 5369 if (IS_GEN6(dev_priv) && ret) {
5095 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); 5370 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5096 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { 5371 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
5097 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", 5372 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5098 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); 5373 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
5099 rc6vids &= 0xffff00; 5374 rc6vids &= 0xffff00;
@@ -5106,9 +5381,8 @@ static void gen6_enable_rps(struct drm_device *dev)
5106 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5381 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5107} 5382}
5108 5383
5109static void __gen6_update_ring_freq(struct drm_device *dev) 5384static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv)
5110{ 5385{
5111 struct drm_i915_private *dev_priv = dev->dev_private;
5112 int min_freq = 15; 5386 int min_freq = 15;
5113 unsigned int gpu_freq; 5387 unsigned int gpu_freq;
5114 unsigned int max_ia_freq, min_ring_freq; 5388 unsigned int max_ia_freq, min_ring_freq;
@@ -5137,7 +5411,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
5137 /* convert DDR frequency from units of 266.6MHz to bandwidth */ 5411 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5138 min_ring_freq = mult_frac(min_ring_freq, 8, 3); 5412 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
5139 5413
5140 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5414 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5141 /* Convert GT frequency to 50 HZ units */ 5415 /* Convert GT frequency to 50 HZ units */
5142 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; 5416 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5143 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; 5417 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
@@ -5155,16 +5429,16 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
5155 int diff = max_gpu_freq - gpu_freq; 5429 int diff = max_gpu_freq - gpu_freq;
5156 unsigned int ia_freq = 0, ring_freq = 0; 5430 unsigned int ia_freq = 0, ring_freq = 0;
5157 5431
5158 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5432 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5159 /* 5433 /*
5160 * ring_freq = 2 * GT. ring_freq is in 100MHz units 5434 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5161 * No floor required for ring frequency on SKL. 5435 * No floor required for ring frequency on SKL.
5162 */ 5436 */
5163 ring_freq = gpu_freq; 5437 ring_freq = gpu_freq;
5164 } else if (INTEL_INFO(dev)->gen >= 8) { 5438 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
5165 /* max(2 * GT, DDR). NB: GT is 50MHz units */ 5439 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5166 ring_freq = max(min_ring_freq, gpu_freq); 5440 ring_freq = max(min_ring_freq, gpu_freq);
5167 } else if (IS_HASWELL(dev)) { 5441 } else if (IS_HASWELL(dev_priv)) {
5168 ring_freq = mult_frac(gpu_freq, 5, 4); 5442 ring_freq = mult_frac(gpu_freq, 5, 4);
5169 ring_freq = max(min_ring_freq, ring_freq); 5443 ring_freq = max(min_ring_freq, ring_freq);
5170 /* leave ia_freq as the default, chosen by cpufreq */ 5444 /* leave ia_freq as the default, chosen by cpufreq */
@@ -5191,26 +5465,23 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
5191 } 5465 }
5192} 5466}
5193 5467
5194void gen6_update_ring_freq(struct drm_device *dev) 5468void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
5195{ 5469{
5196 struct drm_i915_private *dev_priv = dev->dev_private; 5470 if (!HAS_CORE_RING_FREQ(dev_priv))
5197
5198 if (!HAS_CORE_RING_FREQ(dev))
5199 return; 5471 return;
5200 5472
5201 mutex_lock(&dev_priv->rps.hw_lock); 5473 mutex_lock(&dev_priv->rps.hw_lock);
5202 __gen6_update_ring_freq(dev); 5474 __gen6_update_ring_freq(dev_priv);
5203 mutex_unlock(&dev_priv->rps.hw_lock); 5475 mutex_unlock(&dev_priv->rps.hw_lock);
5204} 5476}
5205 5477
5206static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) 5478static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
5207{ 5479{
5208 struct drm_device *dev = dev_priv->dev;
5209 u32 val, rp0; 5480 u32 val, rp0;
5210 5481
5211 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 5482 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5212 5483
5213 switch (INTEL_INFO(dev)->eu_total) { 5484 switch (INTEL_INFO(dev_priv)->eu_total) {
5214 case 8: 5485 case 8:
5215 /* (2 * 4) config */ 5486 /* (2 * 4) config */
5216 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); 5487 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -5321,9 +5592,8 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5321 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); 5592 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5322} 5593}
5323 5594
5324static void cherryview_setup_pctx(struct drm_device *dev) 5595static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
5325{ 5596{
5326 struct drm_i915_private *dev_priv = to_i915(dev);
5327 struct i915_ggtt *ggtt = &dev_priv->ggtt; 5597 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5328 unsigned long pctx_paddr, paddr; 5598 unsigned long pctx_paddr, paddr;
5329 u32 pcbr; 5599 u32 pcbr;
@@ -5342,15 +5612,14 @@ static void cherryview_setup_pctx(struct drm_device *dev)
5342 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 5612 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5343} 5613}
5344 5614
5345static void valleyview_setup_pctx(struct drm_device *dev) 5615static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5346{ 5616{
5347 struct drm_i915_private *dev_priv = dev->dev_private;
5348 struct drm_i915_gem_object *pctx; 5617 struct drm_i915_gem_object *pctx;
5349 unsigned long pctx_paddr; 5618 unsigned long pctx_paddr;
5350 u32 pcbr; 5619 u32 pcbr;
5351 int pctx_size = 24*1024; 5620 int pctx_size = 24*1024;
5352 5621
5353 mutex_lock(&dev->struct_mutex); 5622 mutex_lock(&dev_priv->dev->struct_mutex);
5354 5623
5355 pcbr = I915_READ(VLV_PCBR); 5624 pcbr = I915_READ(VLV_PCBR);
5356 if (pcbr) { 5625 if (pcbr) {
@@ -5375,7 +5644,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
5375 * overlap with other ranges, such as the frame buffer, protected 5644 * overlap with other ranges, such as the frame buffer, protected
5376 * memory, or any other relevant ranges. 5645 * memory, or any other relevant ranges.
5377 */ 5646 */
5378 pctx = i915_gem_object_create_stolen(dev, pctx_size); 5647 pctx = i915_gem_object_create_stolen(dev_priv->dev, pctx_size);
5379 if (!pctx) { 5648 if (!pctx) {
5380 DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); 5649 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5381 goto out; 5650 goto out;
@@ -5387,13 +5656,11 @@ static void valleyview_setup_pctx(struct drm_device *dev)
5387out: 5656out:
5388 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 5657 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5389 dev_priv->vlv_pctx = pctx; 5658 dev_priv->vlv_pctx = pctx;
5390 mutex_unlock(&dev->struct_mutex); 5659 mutex_unlock(&dev_priv->dev->struct_mutex);
5391} 5660}
5392 5661
5393static void valleyview_cleanup_pctx(struct drm_device *dev) 5662static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
5394{ 5663{
5395 struct drm_i915_private *dev_priv = dev->dev_private;
5396
5397 if (WARN_ON(!dev_priv->vlv_pctx)) 5664 if (WARN_ON(!dev_priv->vlv_pctx))
5398 return; 5665 return;
5399 5666
@@ -5412,12 +5679,11 @@ static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
5412 dev_priv->rps.gpll_ref_freq); 5679 dev_priv->rps.gpll_ref_freq);
5413} 5680}
5414 5681
5415static void valleyview_init_gt_powersave(struct drm_device *dev) 5682static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
5416{ 5683{
5417 struct drm_i915_private *dev_priv = dev->dev_private;
5418 u32 val; 5684 u32 val;
5419 5685
5420 valleyview_setup_pctx(dev); 5686 valleyview_setup_pctx(dev_priv);
5421 5687
5422 vlv_init_gpll_ref_freq(dev_priv); 5688 vlv_init_gpll_ref_freq(dev_priv);
5423 5689
@@ -5471,12 +5737,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
5471 mutex_unlock(&dev_priv->rps.hw_lock); 5737 mutex_unlock(&dev_priv->rps.hw_lock);
5472} 5738}
5473 5739
5474static void cherryview_init_gt_powersave(struct drm_device *dev) 5740static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
5475{ 5741{
5476 struct drm_i915_private *dev_priv = dev->dev_private;
5477 u32 val; 5742 u32 val;
5478 5743
5479 cherryview_setup_pctx(dev); 5744 cherryview_setup_pctx(dev_priv);
5480 5745
5481 vlv_init_gpll_ref_freq(dev_priv); 5746 vlv_init_gpll_ref_freq(dev_priv);
5482 5747
@@ -5536,14 +5801,13 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
5536 mutex_unlock(&dev_priv->rps.hw_lock); 5801 mutex_unlock(&dev_priv->rps.hw_lock);
5537} 5802}
5538 5803
5539static void valleyview_cleanup_gt_powersave(struct drm_device *dev) 5804static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
5540{ 5805{
5541 valleyview_cleanup_pctx(dev); 5806 valleyview_cleanup_pctx(dev_priv);
5542} 5807}
5543 5808
5544static void cherryview_enable_rps(struct drm_device *dev) 5809static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
5545{ 5810{
5546 struct drm_i915_private *dev_priv = dev->dev_private;
5547 struct intel_engine_cs *engine; 5811 struct intel_engine_cs *engine;
5548 u32 gtfifodbg, val, rc6_mode = 0, pcbr; 5812 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
5549 5813
@@ -5588,8 +5852,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
5588 pcbr = I915_READ(VLV_PCBR); 5852 pcbr = I915_READ(VLV_PCBR);
5589 5853
5590 /* 3: Enable RC6 */ 5854 /* 3: Enable RC6 */
5591 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && 5855 if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
5592 (pcbr >> VLV_PCBR_ADDR_SHIFT)) 5856 (pcbr >> VLV_PCBR_ADDR_SHIFT))
5593 rc6_mode = GEN7_RC_CTL_TO_MODE; 5857 rc6_mode = GEN7_RC_CTL_TO_MODE;
5594 5858
5595 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 5859 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
@@ -5634,14 +5898,13 @@ static void cherryview_enable_rps(struct drm_device *dev)
5634 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), 5898 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
5635 dev_priv->rps.idle_freq); 5899 dev_priv->rps.idle_freq);
5636 5900
5637 valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 5901 valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
5638 5902
5639 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5903 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5640} 5904}
5641 5905
5642static void valleyview_enable_rps(struct drm_device *dev) 5906static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
5643{ 5907{
5644 struct drm_i915_private *dev_priv = dev->dev_private;
5645 struct intel_engine_cs *engine; 5908 struct intel_engine_cs *engine;
5646 u32 gtfifodbg, val, rc6_mode = 0; 5909 u32 gtfifodbg, val, rc6_mode = 0;
5647 5910
@@ -5694,10 +5957,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
5694 VLV_MEDIA_RC6_COUNT_EN | 5957 VLV_MEDIA_RC6_COUNT_EN |
5695 VLV_RENDER_RC6_COUNT_EN)); 5958 VLV_RENDER_RC6_COUNT_EN));
5696 5959
5697 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 5960 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5698 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; 5961 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
5699 5962
5700 intel_print_rc6_info(dev, rc6_mode); 5963 intel_print_rc6_info(dev_priv, rc6_mode);
5701 5964
5702 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 5965 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5703 5966
@@ -5724,7 +5987,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
5724 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), 5987 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
5725 dev_priv->rps.idle_freq); 5988 dev_priv->rps.idle_freq);
5726 5989
5727 valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 5990 valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
5728 5991
5729 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5992 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5730} 5993}
@@ -5814,10 +6077,9 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
5814 6077
5815unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 6078unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
5816{ 6079{
5817 struct drm_device *dev = dev_priv->dev;
5818 unsigned long val; 6080 unsigned long val;
5819 6081
5820 if (INTEL_INFO(dev)->gen != 5) 6082 if (INTEL_INFO(dev_priv)->gen != 5)
5821 return 0; 6083 return 0;
5822 6084
5823 spin_lock_irq(&mchdev_lock); 6085 spin_lock_irq(&mchdev_lock);
@@ -5857,11 +6119,10 @@ static int _pxvid_to_vd(u8 pxvid)
5857 6119
5858static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 6120static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
5859{ 6121{
5860 struct drm_device *dev = dev_priv->dev;
5861 const int vd = _pxvid_to_vd(pxvid); 6122 const int vd = _pxvid_to_vd(pxvid);
5862 const int vm = vd - 1125; 6123 const int vm = vd - 1125;
5863 6124
5864 if (INTEL_INFO(dev)->is_mobile) 6125 if (INTEL_INFO(dev_priv)->is_mobile)
5865 return vm > 0 ? vm : 0; 6126 return vm > 0 ? vm : 0;
5866 6127
5867 return vd; 6128 return vd;
@@ -5902,9 +6163,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
5902 6163
5903void i915_update_gfx_val(struct drm_i915_private *dev_priv) 6164void i915_update_gfx_val(struct drm_i915_private *dev_priv)
5904{ 6165{
5905 struct drm_device *dev = dev_priv->dev; 6166 if (INTEL_INFO(dev_priv)->gen != 5)
5906
5907 if (INTEL_INFO(dev)->gen != 5)
5908 return; 6167 return;
5909 6168
5910 spin_lock_irq(&mchdev_lock); 6169 spin_lock_irq(&mchdev_lock);
@@ -5953,10 +6212,9 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
5953 6212
5954unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 6213unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
5955{ 6214{
5956 struct drm_device *dev = dev_priv->dev;
5957 unsigned long val; 6215 unsigned long val;
5958 6216
5959 if (INTEL_INFO(dev)->gen != 5) 6217 if (INTEL_INFO(dev_priv)->gen != 5)
5960 return 0; 6218 return 0;
5961 6219
5962 spin_lock_irq(&mchdev_lock); 6220 spin_lock_irq(&mchdev_lock);
@@ -6097,7 +6355,7 @@ bool i915_gpu_turbo_disable(void)
6097 6355
6098 dev_priv->ips.max_delay = dev_priv->ips.fstart; 6356 dev_priv->ips.max_delay = dev_priv->ips.fstart;
6099 6357
6100 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart)) 6358 if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
6101 ret = false; 6359 ret = false;
6102 6360
6103out_unlock: 6361out_unlock:
@@ -6145,9 +6403,8 @@ void intel_gpu_ips_teardown(void)
6145 spin_unlock_irq(&mchdev_lock); 6403 spin_unlock_irq(&mchdev_lock);
6146} 6404}
6147 6405
6148static void intel_init_emon(struct drm_device *dev) 6406static void intel_init_emon(struct drm_i915_private *dev_priv)
6149{ 6407{
6150 struct drm_i915_private *dev_priv = dev->dev_private;
6151 u32 lcfuse; 6408 u32 lcfuse;
6152 u8 pxw[16]; 6409 u8 pxw[16];
6153 int i; 6410 int i;
@@ -6216,10 +6473,8 @@ static void intel_init_emon(struct drm_device *dev)
6216 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); 6473 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
6217} 6474}
6218 6475
6219void intel_init_gt_powersave(struct drm_device *dev) 6476void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
6220{ 6477{
6221 struct drm_i915_private *dev_priv = dev->dev_private;
6222
6223 /* 6478 /*
6224 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a 6479 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6225 * requirement. 6480 * requirement.
@@ -6229,74 +6484,66 @@ void intel_init_gt_powersave(struct drm_device *dev)
6229 intel_runtime_pm_get(dev_priv); 6484 intel_runtime_pm_get(dev_priv);
6230 } 6485 }
6231 6486
6232 if (IS_CHERRYVIEW(dev)) 6487 if (IS_CHERRYVIEW(dev_priv))
6233 cherryview_init_gt_powersave(dev); 6488 cherryview_init_gt_powersave(dev_priv);
6234 else if (IS_VALLEYVIEW(dev)) 6489 else if (IS_VALLEYVIEW(dev_priv))
6235 valleyview_init_gt_powersave(dev); 6490 valleyview_init_gt_powersave(dev_priv);
6236} 6491}
6237 6492
6238void intel_cleanup_gt_powersave(struct drm_device *dev) 6493void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
6239{ 6494{
6240 struct drm_i915_private *dev_priv = dev->dev_private; 6495 if (IS_CHERRYVIEW(dev_priv))
6241
6242 if (IS_CHERRYVIEW(dev))
6243 return; 6496 return;
6244 else if (IS_VALLEYVIEW(dev)) 6497 else if (IS_VALLEYVIEW(dev_priv))
6245 valleyview_cleanup_gt_powersave(dev); 6498 valleyview_cleanup_gt_powersave(dev_priv);
6246 6499
6247 if (!i915.enable_rc6) 6500 if (!i915.enable_rc6)
6248 intel_runtime_pm_put(dev_priv); 6501 intel_runtime_pm_put(dev_priv);
6249} 6502}
6250 6503
6251static void gen6_suspend_rps(struct drm_device *dev) 6504static void gen6_suspend_rps(struct drm_i915_private *dev_priv)
6252{ 6505{
6253 struct drm_i915_private *dev_priv = dev->dev_private;
6254
6255 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 6506 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
6256 6507
6257 gen6_disable_rps_interrupts(dev); 6508 gen6_disable_rps_interrupts(dev_priv);
6258} 6509}
6259 6510
6260/** 6511/**
6261 * intel_suspend_gt_powersave - suspend PM work and helper threads 6512 * intel_suspend_gt_powersave - suspend PM work and helper threads
6262 * @dev: drm device 6513 * @dev_priv: i915 device
6263 * 6514 *
6264 * We don't want to disable RC6 or other features here, we just want 6515 * We don't want to disable RC6 or other features here, we just want
6265 * to make sure any work we've queued has finished and won't bother 6516 * to make sure any work we've queued has finished and won't bother
6266 * us while we're suspended. 6517 * us while we're suspended.
6267 */ 6518 */
6268void intel_suspend_gt_powersave(struct drm_device *dev) 6519void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
6269{ 6520{
6270 struct drm_i915_private *dev_priv = dev->dev_private; 6521 if (INTEL_GEN(dev_priv) < 6)
6271
6272 if (INTEL_INFO(dev)->gen < 6)
6273 return; 6522 return;
6274 6523
6275 gen6_suspend_rps(dev); 6524 gen6_suspend_rps(dev_priv);
6276 6525
6277 /* Force GPU to min freq during suspend */ 6526 /* Force GPU to min freq during suspend */
6278 gen6_rps_idle(dev_priv); 6527 gen6_rps_idle(dev_priv);
6279} 6528}
6280 6529
6281void intel_disable_gt_powersave(struct drm_device *dev) 6530void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
6282{ 6531{
6283 struct drm_i915_private *dev_priv = dev->dev_private; 6532 if (IS_IRONLAKE_M(dev_priv)) {
6284 6533 ironlake_disable_drps(dev_priv);
6285 if (IS_IRONLAKE_M(dev)) { 6534 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6286 ironlake_disable_drps(dev); 6535 intel_suspend_gt_powersave(dev_priv);
6287 } else if (INTEL_INFO(dev)->gen >= 6) {
6288 intel_suspend_gt_powersave(dev);
6289 6536
6290 mutex_lock(&dev_priv->rps.hw_lock); 6537 mutex_lock(&dev_priv->rps.hw_lock);
6291 if (INTEL_INFO(dev)->gen >= 9) { 6538 if (INTEL_INFO(dev_priv)->gen >= 9) {
6292 gen9_disable_rc6(dev); 6539 gen9_disable_rc6(dev_priv);
6293 gen9_disable_rps(dev); 6540 gen9_disable_rps(dev_priv);
6294 } else if (IS_CHERRYVIEW(dev)) 6541 } else if (IS_CHERRYVIEW(dev_priv))
6295 cherryview_disable_rps(dev); 6542 cherryview_disable_rps(dev_priv);
6296 else if (IS_VALLEYVIEW(dev)) 6543 else if (IS_VALLEYVIEW(dev_priv))
6297 valleyview_disable_rps(dev); 6544 valleyview_disable_rps(dev_priv);
6298 else 6545 else
6299 gen6_disable_rps(dev); 6546 gen6_disable_rps(dev_priv);
6300 6547
6301 dev_priv->rps.enabled = false; 6548 dev_priv->rps.enabled = false;
6302 mutex_unlock(&dev_priv->rps.hw_lock); 6549 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -6308,27 +6555,26 @@ static void intel_gen6_powersave_work(struct work_struct *work)
6308 struct drm_i915_private *dev_priv = 6555 struct drm_i915_private *dev_priv =
6309 container_of(work, struct drm_i915_private, 6556 container_of(work, struct drm_i915_private,
6310 rps.delayed_resume_work.work); 6557 rps.delayed_resume_work.work);
6311 struct drm_device *dev = dev_priv->dev;
6312 6558
6313 mutex_lock(&dev_priv->rps.hw_lock); 6559 mutex_lock(&dev_priv->rps.hw_lock);
6314 6560
6315 gen6_reset_rps_interrupts(dev); 6561 gen6_reset_rps_interrupts(dev_priv);
6316 6562
6317 if (IS_CHERRYVIEW(dev)) { 6563 if (IS_CHERRYVIEW(dev_priv)) {
6318 cherryview_enable_rps(dev); 6564 cherryview_enable_rps(dev_priv);
6319 } else if (IS_VALLEYVIEW(dev)) { 6565 } else if (IS_VALLEYVIEW(dev_priv)) {
6320 valleyview_enable_rps(dev); 6566 valleyview_enable_rps(dev_priv);
6321 } else if (INTEL_INFO(dev)->gen >= 9) { 6567 } else if (INTEL_INFO(dev_priv)->gen >= 9) {
6322 gen9_enable_rc6(dev); 6568 gen9_enable_rc6(dev_priv);
6323 gen9_enable_rps(dev); 6569 gen9_enable_rps(dev_priv);
6324 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 6570 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
6325 __gen6_update_ring_freq(dev); 6571 __gen6_update_ring_freq(dev_priv);
6326 } else if (IS_BROADWELL(dev)) { 6572 } else if (IS_BROADWELL(dev_priv)) {
6327 gen8_enable_rps(dev); 6573 gen8_enable_rps(dev_priv);
6328 __gen6_update_ring_freq(dev); 6574 __gen6_update_ring_freq(dev_priv);
6329 } else { 6575 } else {
6330 gen6_enable_rps(dev); 6576 gen6_enable_rps(dev_priv);
6331 __gen6_update_ring_freq(dev); 6577 __gen6_update_ring_freq(dev_priv);
6332 } 6578 }
6333 6579
6334 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq); 6580 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
@@ -6339,27 +6585,25 @@ static void intel_gen6_powersave_work(struct work_struct *work)
6339 6585
6340 dev_priv->rps.enabled = true; 6586 dev_priv->rps.enabled = true;
6341 6587
6342 gen6_enable_rps_interrupts(dev); 6588 gen6_enable_rps_interrupts(dev_priv);
6343 6589
6344 mutex_unlock(&dev_priv->rps.hw_lock); 6590 mutex_unlock(&dev_priv->rps.hw_lock);
6345 6591
6346 intel_runtime_pm_put(dev_priv); 6592 intel_runtime_pm_put(dev_priv);
6347} 6593}
6348 6594
6349void intel_enable_gt_powersave(struct drm_device *dev) 6595void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
6350{ 6596{
6351 struct drm_i915_private *dev_priv = dev->dev_private;
6352
6353 /* Powersaving is controlled by the host when inside a VM */ 6597 /* Powersaving is controlled by the host when inside a VM */
6354 if (intel_vgpu_active(dev)) 6598 if (intel_vgpu_active(dev_priv))
6355 return; 6599 return;
6356 6600
6357 if (IS_IRONLAKE_M(dev)) { 6601 if (IS_IRONLAKE_M(dev_priv)) {
6358 ironlake_enable_drps(dev); 6602 ironlake_enable_drps(dev_priv);
6359 mutex_lock(&dev->struct_mutex); 6603 mutex_lock(&dev_priv->dev->struct_mutex);
6360 intel_init_emon(dev); 6604 intel_init_emon(dev_priv);
6361 mutex_unlock(&dev->struct_mutex); 6605 mutex_unlock(&dev_priv->dev->struct_mutex);
6362 } else if (INTEL_INFO(dev)->gen >= 6) { 6606 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6363 /* 6607 /*
6364 * PCU communication is slow and this doesn't need to be 6608 * PCU communication is slow and this doesn't need to be
6365 * done at any specific time, so do this out of our fast path 6609 * done at any specific time, so do this out of our fast path
@@ -6378,14 +6622,12 @@ void intel_enable_gt_powersave(struct drm_device *dev)
6378 } 6622 }
6379} 6623}
6380 6624
6381void intel_reset_gt_powersave(struct drm_device *dev) 6625void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
6382{ 6626{
6383 struct drm_i915_private *dev_priv = dev->dev_private; 6627 if (INTEL_INFO(dev_priv)->gen < 6)
6384
6385 if (INTEL_INFO(dev)->gen < 6)
6386 return; 6628 return;
6387 6629
6388 gen6_suspend_rps(dev); 6630 gen6_suspend_rps(dev_priv);
6389 dev_priv->rps.enabled = false; 6631 dev_priv->rps.enabled = false;
6390} 6632}
6391 6633
@@ -6698,11 +6940,42 @@ static void lpt_suspend_hw(struct drm_device *dev)
6698 } 6940 }
6699} 6941}
6700 6942
6943static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
6944 int general_prio_credits,
6945 int high_prio_credits)
6946{
6947 u32 misccpctl;
6948
6949 /* WaTempDisableDOPClkGating:bdw */
6950 misccpctl = I915_READ(GEN7_MISCCPCTL);
6951 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6952
6953 I915_WRITE(GEN8_L3SQCREG1,
6954 L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
6955 L3_HIGH_PRIO_CREDITS(high_prio_credits));
6956
6957 /*
6958 * Wait at least 100 clocks before re-enabling clock gating.
6959 * See the definition of L3SQCREG1 in BSpec.
6960 */
6961 POSTING_READ(GEN8_L3SQCREG1);
6962 udelay(1);
6963 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6964}
6965
6966static void skylake_init_clock_gating(struct drm_device *dev)
6967{
6968 struct drm_i915_private *dev_priv = dev->dev_private;
6969
6970 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,kbl */
6971 I915_WRITE(CHICKEN_PAR1_1,
6972 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
6973}
6974
6701static void broadwell_init_clock_gating(struct drm_device *dev) 6975static void broadwell_init_clock_gating(struct drm_device *dev)
6702{ 6976{
6703 struct drm_i915_private *dev_priv = dev->dev_private; 6977 struct drm_i915_private *dev_priv = dev->dev_private;
6704 enum pipe pipe; 6978 enum pipe pipe;
6705 uint32_t misccpctl;
6706 6979
6707 ilk_init_lp_watermarks(dev); 6980 ilk_init_lp_watermarks(dev);
6708 6981
@@ -6733,20 +7006,8 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
6733 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 7006 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6734 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 7007 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6735 7008
6736 /* 7009 /* WaProgramL3SqcReg1Default:bdw */
6737 * WaProgramL3SqcReg1Default:bdw 7010 gen8_set_l3sqc_credits(dev_priv, 30, 2);
6738 * WaTempDisableDOPClkGating:bdw
6739 */
6740 misccpctl = I915_READ(GEN7_MISCCPCTL);
6741 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6742 I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
6743 /*
6744 * Wait at least 100 clocks before re-enabling clock gating. See
6745 * the definition of L3SQCREG1 in BSpec.
6746 */
6747 POSTING_READ(GEN8_L3SQCREG1);
6748 udelay(1);
6749 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6750 7011
6751 /* 7012 /*
6752 * WaGttCachingOffByDefault:bdw 7013 * WaGttCachingOffByDefault:bdw
@@ -7017,6 +7278,13 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
7017 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 7278 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7018 7279
7019 /* 7280 /*
7281 * WaProgramL3SqcReg1Default:chv
7282 * See gfxspecs/Related Documents/Performance Guide/
7283 * LSQC Setting Recommendations.
7284 */
7285 gen8_set_l3sqc_credits(dev_priv, 38, 2);
7286
7287 /*
7020 * GTT cache may not work with big pages, so if those 7288 * GTT cache may not work with big pages, so if those
7021 * are ever enabled GTT cache may need to be disabled. 7289 * are ever enabled GTT cache may need to be disabled.
7022 */ 7290 */
@@ -7163,9 +7431,9 @@ static void nop_init_clock_gating(struct drm_device *dev)
7163void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) 7431void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7164{ 7432{
7165 if (IS_SKYLAKE(dev_priv)) 7433 if (IS_SKYLAKE(dev_priv))
7166 dev_priv->display.init_clock_gating = nop_init_clock_gating; 7434 dev_priv->display.init_clock_gating = skylake_init_clock_gating;
7167 else if (IS_KABYLAKE(dev_priv)) 7435 else if (IS_KABYLAKE(dev_priv))
7168 dev_priv->display.init_clock_gating = nop_init_clock_gating; 7436 dev_priv->display.init_clock_gating = skylake_init_clock_gating;
7169 else if (IS_BROXTON(dev_priv)) 7437 else if (IS_BROXTON(dev_priv))
7170 dev_priv->display.init_clock_gating = bxt_init_clock_gating; 7438 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
7171 else if (IS_BROADWELL(dev_priv)) 7439 else if (IS_BROADWELL(dev_priv))
@@ -7217,6 +7485,7 @@ void intel_init_pm(struct drm_device *dev)
7217 if (INTEL_INFO(dev)->gen >= 9) { 7485 if (INTEL_INFO(dev)->gen >= 9) {
7218 skl_setup_wm_latency(dev); 7486 skl_setup_wm_latency(dev);
7219 dev_priv->display.update_wm = skl_update_wm; 7487 dev_priv->display.update_wm = skl_update_wm;
7488 dev_priv->display.compute_global_watermarks = skl_compute_wm;
7220 } else if (HAS_PCH_SPLIT(dev)) { 7489 } else if (HAS_PCH_SPLIT(dev)) {
7221 ilk_setup_wm_latency(dev); 7490 ilk_setup_wm_latency(dev);
7222 7491
@@ -7390,19 +7659,17 @@ static void __intel_rps_boost_work(struct work_struct *work)
7390 struct drm_i915_gem_request *req = boost->req; 7659 struct drm_i915_gem_request *req = boost->req;
7391 7660
7392 if (!i915_gem_request_completed(req, true)) 7661 if (!i915_gem_request_completed(req, true))
7393 gen6_rps_boost(to_i915(req->engine->dev), NULL, 7662 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
7394 req->emitted_jiffies);
7395 7663
7396 i915_gem_request_unreference__unlocked(req); 7664 i915_gem_request_unreference(req);
7397 kfree(boost); 7665 kfree(boost);
7398} 7666}
7399 7667
7400void intel_queue_rps_boost_for_request(struct drm_device *dev, 7668void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
7401 struct drm_i915_gem_request *req)
7402{ 7669{
7403 struct request_boost *boost; 7670 struct request_boost *boost;
7404 7671
7405 if (req == NULL || INTEL_INFO(dev)->gen < 6) 7672 if (req == NULL || INTEL_GEN(req->i915) < 6)
7406 return; 7673 return;
7407 7674
7408 if (i915_gem_request_completed(req, true)) 7675 if (i915_gem_request_completed(req, true))
@@ -7416,7 +7683,7 @@ void intel_queue_rps_boost_for_request(struct drm_device *dev,
7416 boost->req = req; 7683 boost->req = req;
7417 7684
7418 INIT_WORK(&boost->work, __intel_rps_boost_work); 7685 INIT_WORK(&boost->work, __intel_rps_boost_work);
7419 queue_work(to_i915(dev)->wq, &boost->work); 7686 queue_work(req->i915->wq, &boost->work);
7420} 7687}
7421 7688
7422void intel_pm_setup(struct drm_device *dev) 7689void intel_pm_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index a788d1e9589b..29a09bf6bd18 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -176,7 +176,6 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
176 struct drm_i915_private *dev_priv = dev->dev_private; 176 struct drm_i915_private *dev_priv = dev->dev_private;
177 uint32_t aux_clock_divider; 177 uint32_t aux_clock_divider;
178 i915_reg_t aux_ctl_reg; 178 i915_reg_t aux_ctl_reg;
179 int precharge = 0x3;
180 static const uint8_t aux_msg[] = { 179 static const uint8_t aux_msg[] = {
181 [0] = DP_AUX_NATIVE_WRITE << 4, 180 [0] = DP_AUX_NATIVE_WRITE << 4,
182 [1] = DP_SET_POWER >> 8, 181 [1] = DP_SET_POWER >> 8,
@@ -185,6 +184,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
185 [4] = DP_SET_POWER_D0, 184 [4] = DP_SET_POWER_D0,
186 }; 185 };
187 enum port port = dig_port->port; 186 enum port port = dig_port->port;
187 u32 aux_ctl;
188 int i; 188 int i;
189 189
190 BUILD_BUG_ON(sizeof(aux_msg) > 20); 190 BUILD_BUG_ON(sizeof(aux_msg) > 20);
@@ -197,6 +197,13 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
197 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF, 197 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
198 DP_AUX_FRAME_SYNC_ENABLE); 198 DP_AUX_FRAME_SYNC_ENABLE);
199 199
200 if (dev_priv->psr.link_standby)
201 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
202 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
203 else
204 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
205 DP_PSR_ENABLE);
206
200 aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port); 207 aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
201 208
202 /* Setup AUX registers */ 209 /* Setup AUX registers */
@@ -204,33 +211,9 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
204 I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2), 211 I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
205 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); 212 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
206 213
207 if (INTEL_INFO(dev)->gen >= 9) { 214 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
208 uint32_t val; 215 aux_clock_divider);
209 216 I915_WRITE(aux_ctl_reg, aux_ctl);
210 val = I915_READ(aux_ctl_reg);
211 val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
212 val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
213 val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
214 val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
215 /* Use hardcoded data values for PSR, frame sync and GTC */
216 val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
217 val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
218 val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
219 I915_WRITE(aux_ctl_reg, val);
220 } else {
221 I915_WRITE(aux_ctl_reg,
222 DP_AUX_CH_CTL_TIME_OUT_400us |
223 (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
224 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
225 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
226 }
227
228 if (dev_priv->psr.link_standby)
229 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
230 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
231 else
232 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
233 DP_PSR_ENABLE);
234} 217}
235 218
236static void vlv_psr_enable_source(struct intel_dp *intel_dp) 219static void vlv_psr_enable_source(struct intel_dp *intel_dp)
@@ -272,14 +255,14 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
272 struct drm_i915_private *dev_priv = dev->dev_private; 255 struct drm_i915_private *dev_priv = dev->dev_private;
273 256
274 uint32_t max_sleep_time = 0x1f; 257 uint32_t max_sleep_time = 0x1f;
275 /* 258 /* Lately it was identified that depending on panel idle frame count
276 * Let's respect VBT in case VBT asks a higher idle_frame value. 259 * calculated at HW can be off by 1. So let's use what came
277 * Let's use 6 as the minimum to cover all known cases including 260 * from VBT + 1.
278 * the off-by-one issue that HW has in some cases. Also there are 261 * There are also other cases where panel demands at least 4
279 * cases where sink should be able to train 262 * but VBT is not being set. To cover these 2 cases lets use
280 * with the 5 or 6 idle patterns. 263 * at least 5 when VBT isn't set to be on the safest side.
281 */ 264 */
282 uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); 265 uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1;
283 uint32_t val = EDP_PSR_ENABLE; 266 uint32_t val = EDP_PSR_ENABLE;
284 267
285 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; 268 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 04402bb9d26b..8d35a3978f9b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,11 @@
34#include "i915_trace.h" 34#include "i915_trace.h"
35#include "intel_drv.h" 35#include "intel_drv.h"
36 36
37/* Rough estimate of the typical request size, performing a flush,
38 * set-context and then emitting the batch.
39 */
40#define LEGACY_REQUEST_SIZE 200
41
37int __intel_ring_space(int head, int tail, int size) 42int __intel_ring_space(int head, int tail, int size)
38{ 43{
39 int space = head - tail; 44 int space = head - tail;
@@ -55,7 +60,7 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
55 60
56bool intel_engine_stopped(struct intel_engine_cs *engine) 61bool intel_engine_stopped(struct intel_engine_cs *engine)
57{ 62{
58 struct drm_i915_private *dev_priv = engine->dev->dev_private; 63 struct drm_i915_private *dev_priv = engine->i915;
59 return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine); 64 return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
60} 65}
61 66
@@ -101,7 +106,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
101 u32 flush_domains) 106 u32 flush_domains)
102{ 107{
103 struct intel_engine_cs *engine = req->engine; 108 struct intel_engine_cs *engine = req->engine;
104 struct drm_device *dev = engine->dev;
105 u32 cmd; 109 u32 cmd;
106 int ret; 110 int ret;
107 111
@@ -140,7 +144,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
140 cmd |= MI_EXE_FLUSH; 144 cmd |= MI_EXE_FLUSH;
141 145
142 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 146 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
143 (IS_G4X(dev) || IS_GEN5(dev))) 147 (IS_G4X(req->i915) || IS_GEN5(req->i915)))
144 cmd |= MI_INVALIDATE_ISP; 148 cmd |= MI_INVALIDATE_ISP;
145 149
146 ret = intel_ring_begin(req, 2); 150 ret = intel_ring_begin(req, 2);
@@ -426,19 +430,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
426static void ring_write_tail(struct intel_engine_cs *engine, 430static void ring_write_tail(struct intel_engine_cs *engine,
427 u32 value) 431 u32 value)
428{ 432{
429 struct drm_i915_private *dev_priv = engine->dev->dev_private; 433 struct drm_i915_private *dev_priv = engine->i915;
430 I915_WRITE_TAIL(engine, value); 434 I915_WRITE_TAIL(engine, value);
431} 435}
432 436
433u64 intel_ring_get_active_head(struct intel_engine_cs *engine) 437u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
434{ 438{
435 struct drm_i915_private *dev_priv = engine->dev->dev_private; 439 struct drm_i915_private *dev_priv = engine->i915;
436 u64 acthd; 440 u64 acthd;
437 441
438 if (INTEL_INFO(engine->dev)->gen >= 8) 442 if (INTEL_GEN(dev_priv) >= 8)
439 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), 443 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
440 RING_ACTHD_UDW(engine->mmio_base)); 444 RING_ACTHD_UDW(engine->mmio_base));
441 else if (INTEL_INFO(engine->dev)->gen >= 4) 445 else if (INTEL_GEN(dev_priv) >= 4)
442 acthd = I915_READ(RING_ACTHD(engine->mmio_base)); 446 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
443 else 447 else
444 acthd = I915_READ(ACTHD); 448 acthd = I915_READ(ACTHD);
@@ -448,25 +452,24 @@ u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
448 452
449static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 453static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
450{ 454{
451 struct drm_i915_private *dev_priv = engine->dev->dev_private; 455 struct drm_i915_private *dev_priv = engine->i915;
452 u32 addr; 456 u32 addr;
453 457
454 addr = dev_priv->status_page_dmah->busaddr; 458 addr = dev_priv->status_page_dmah->busaddr;
455 if (INTEL_INFO(engine->dev)->gen >= 4) 459 if (INTEL_GEN(dev_priv) >= 4)
456 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 460 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
457 I915_WRITE(HWS_PGA, addr); 461 I915_WRITE(HWS_PGA, addr);
458} 462}
459 463
460static void intel_ring_setup_status_page(struct intel_engine_cs *engine) 464static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
461{ 465{
462 struct drm_device *dev = engine->dev; 466 struct drm_i915_private *dev_priv = engine->i915;
463 struct drm_i915_private *dev_priv = engine->dev->dev_private;
464 i915_reg_t mmio; 467 i915_reg_t mmio;
465 468
466 /* The ring status page addresses are no longer next to the rest of 469 /* The ring status page addresses are no longer next to the rest of
467 * the ring registers as of gen7. 470 * the ring registers as of gen7.
468 */ 471 */
469 if (IS_GEN7(dev)) { 472 if (IS_GEN7(dev_priv)) {
470 switch (engine->id) { 473 switch (engine->id) {
471 case RCS: 474 case RCS:
472 mmio = RENDER_HWS_PGA_GEN7; 475 mmio = RENDER_HWS_PGA_GEN7;
@@ -486,7 +489,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
486 mmio = VEBOX_HWS_PGA_GEN7; 489 mmio = VEBOX_HWS_PGA_GEN7;
487 break; 490 break;
488 } 491 }
489 } else if (IS_GEN6(engine->dev)) { 492 } else if (IS_GEN6(dev_priv)) {
490 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 493 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
491 } else { 494 } else {
492 /* XXX: gen8 returns to sanity */ 495 /* XXX: gen8 returns to sanity */
@@ -503,7 +506,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
503 * arises: do we still need this and if so how should we go about 506 * arises: do we still need this and if so how should we go about
504 * invalidating the TLB? 507 * invalidating the TLB?
505 */ 508 */
506 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { 509 if (IS_GEN(dev_priv, 6, 7)) {
507 i915_reg_t reg = RING_INSTPM(engine->mmio_base); 510 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
508 511
509 /* ring should be idle before issuing a sync flush*/ 512 /* ring should be idle before issuing a sync flush*/
@@ -521,9 +524,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
521 524
522static bool stop_ring(struct intel_engine_cs *engine) 525static bool stop_ring(struct intel_engine_cs *engine)
523{ 526{
524 struct drm_i915_private *dev_priv = to_i915(engine->dev); 527 struct drm_i915_private *dev_priv = engine->i915;
525 528
526 if (!IS_GEN2(engine->dev)) { 529 if (!IS_GEN2(dev_priv)) {
527 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); 530 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
528 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { 531 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
529 DRM_ERROR("%s : timed out trying to stop ring\n", 532 DRM_ERROR("%s : timed out trying to stop ring\n",
@@ -541,7 +544,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
541 I915_WRITE_HEAD(engine, 0); 544 I915_WRITE_HEAD(engine, 0);
542 engine->write_tail(engine, 0); 545 engine->write_tail(engine, 0);
543 546
544 if (!IS_GEN2(engine->dev)) { 547 if (!IS_GEN2(dev_priv)) {
545 (void)I915_READ_CTL(engine); 548 (void)I915_READ_CTL(engine);
546 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); 549 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
547 } 550 }
@@ -556,8 +559,7 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
556 559
557static int init_ring_common(struct intel_engine_cs *engine) 560static int init_ring_common(struct intel_engine_cs *engine)
558{ 561{
559 struct drm_device *dev = engine->dev; 562 struct drm_i915_private *dev_priv = engine->i915;
560 struct drm_i915_private *dev_priv = dev->dev_private;
561 struct intel_ringbuffer *ringbuf = engine->buffer; 563 struct intel_ringbuffer *ringbuf = engine->buffer;
562 struct drm_i915_gem_object *obj = ringbuf->obj; 564 struct drm_i915_gem_object *obj = ringbuf->obj;
563 int ret = 0; 565 int ret = 0;
@@ -587,7 +589,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
587 } 589 }
588 } 590 }
589 591
590 if (I915_NEED_GFX_HWS(dev)) 592 if (I915_NEED_GFX_HWS(dev_priv))
591 intel_ring_setup_status_page(engine); 593 intel_ring_setup_status_page(engine);
592 else 594 else
593 ring_setup_phys_status_page(engine); 595 ring_setup_phys_status_page(engine);
@@ -644,12 +646,10 @@ out:
644void 646void
645intel_fini_pipe_control(struct intel_engine_cs *engine) 647intel_fini_pipe_control(struct intel_engine_cs *engine)
646{ 648{
647 struct drm_device *dev = engine->dev;
648
649 if (engine->scratch.obj == NULL) 649 if (engine->scratch.obj == NULL)
650 return; 650 return;
651 651
652 if (INTEL_INFO(dev)->gen >= 5) { 652 if (INTEL_GEN(engine->i915) >= 5) {
653 kunmap(sg_page(engine->scratch.obj->pages->sgl)); 653 kunmap(sg_page(engine->scratch.obj->pages->sgl));
654 i915_gem_object_ggtt_unpin(engine->scratch.obj); 654 i915_gem_object_ggtt_unpin(engine->scratch.obj);
655 } 655 }
@@ -665,10 +665,11 @@ intel_init_pipe_control(struct intel_engine_cs *engine)
665 665
666 WARN_ON(engine->scratch.obj); 666 WARN_ON(engine->scratch.obj);
667 667
668 engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096); 668 engine->scratch.obj = i915_gem_object_create(engine->i915->dev, 4096);
669 if (engine->scratch.obj == NULL) { 669 if (IS_ERR(engine->scratch.obj)) {
670 DRM_ERROR("Failed to allocate seqno page\n"); 670 DRM_ERROR("Failed to allocate seqno page\n");
671 ret = -ENOMEM; 671 ret = PTR_ERR(engine->scratch.obj);
672 engine->scratch.obj = NULL;
672 goto err; 673 goto err;
673 } 674 }
674 675
@@ -702,11 +703,9 @@ err:
702 703
703static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) 704static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
704{ 705{
705 int ret, i;
706 struct intel_engine_cs *engine = req->engine; 706 struct intel_engine_cs *engine = req->engine;
707 struct drm_device *dev = engine->dev; 707 struct i915_workarounds *w = &req->i915->workarounds;
708 struct drm_i915_private *dev_priv = dev->dev_private; 708 int ret, i;
709 struct i915_workarounds *w = &dev_priv->workarounds;
710 709
711 if (w->count == 0) 710 if (w->count == 0)
712 return 0; 711 return 0;
@@ -795,7 +794,7 @@ static int wa_add(struct drm_i915_private *dev_priv,
795static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, 794static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
796 i915_reg_t reg) 795 i915_reg_t reg)
797{ 796{
798 struct drm_i915_private *dev_priv = engine->dev->dev_private; 797 struct drm_i915_private *dev_priv = engine->i915;
799 struct i915_workarounds *wa = &dev_priv->workarounds; 798 struct i915_workarounds *wa = &dev_priv->workarounds;
800 const uint32_t index = wa->hw_whitelist_count[engine->id]; 799 const uint32_t index = wa->hw_whitelist_count[engine->id];
801 800
@@ -811,8 +810,7 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
811 810
812static int gen8_init_workarounds(struct intel_engine_cs *engine) 811static int gen8_init_workarounds(struct intel_engine_cs *engine)
813{ 812{
814 struct drm_device *dev = engine->dev; 813 struct drm_i915_private *dev_priv = engine->i915;
815 struct drm_i915_private *dev_priv = dev->dev_private;
816 814
817 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); 815 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
818 816
@@ -863,9 +861,8 @@ static int gen8_init_workarounds(struct intel_engine_cs *engine)
863 861
864static int bdw_init_workarounds(struct intel_engine_cs *engine) 862static int bdw_init_workarounds(struct intel_engine_cs *engine)
865{ 863{
864 struct drm_i915_private *dev_priv = engine->i915;
866 int ret; 865 int ret;
867 struct drm_device *dev = engine->dev;
868 struct drm_i915_private *dev_priv = dev->dev_private;
869 866
870 ret = gen8_init_workarounds(engine); 867 ret = gen8_init_workarounds(engine);
871 if (ret) 868 if (ret)
@@ -885,16 +882,15 @@ static int bdw_init_workarounds(struct intel_engine_cs *engine)
885 /* WaForceContextSaveRestoreNonCoherent:bdw */ 882 /* WaForceContextSaveRestoreNonCoherent:bdw */
886 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 883 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
887 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ 884 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
888 (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); 885 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
889 886
890 return 0; 887 return 0;
891} 888}
892 889
893static int chv_init_workarounds(struct intel_engine_cs *engine) 890static int chv_init_workarounds(struct intel_engine_cs *engine)
894{ 891{
892 struct drm_i915_private *dev_priv = engine->i915;
895 int ret; 893 int ret;
896 struct drm_device *dev = engine->dev;
897 struct drm_i915_private *dev_priv = dev->dev_private;
898 894
899 ret = gen8_init_workarounds(engine); 895 ret = gen8_init_workarounds(engine);
900 if (ret) 896 if (ret)
@@ -911,8 +907,7 @@ static int chv_init_workarounds(struct intel_engine_cs *engine)
911 907
912static int gen9_init_workarounds(struct intel_engine_cs *engine) 908static int gen9_init_workarounds(struct intel_engine_cs *engine)
913{ 909{
914 struct drm_device *dev = engine->dev; 910 struct drm_i915_private *dev_priv = engine->i915;
915 struct drm_i915_private *dev_priv = dev->dev_private;
916 uint32_t tmp; 911 uint32_t tmp;
917 int ret; 912 int ret;
918 913
@@ -935,14 +930,14 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
935 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); 930 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
936 931
937 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ 932 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
938 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 933 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
939 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 934 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
940 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 935 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
941 GEN9_DG_MIRROR_FIX_ENABLE); 936 GEN9_DG_MIRROR_FIX_ENABLE);
942 937
943 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 938 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
944 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 939 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
945 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 940 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
946 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, 941 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
947 GEN9_RHWO_OPTIMIZATION_DISABLE); 942 GEN9_RHWO_OPTIMIZATION_DISABLE);
948 /* 943 /*
@@ -968,20 +963,20 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
968 GEN9_CCS_TLB_PREFETCH_ENABLE); 963 GEN9_CCS_TLB_PREFETCH_ENABLE);
969 964
970 /* WaDisableMaskBasedCammingInRCC:skl,bxt */ 965 /* WaDisableMaskBasedCammingInRCC:skl,bxt */
971 if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) || 966 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
972 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 967 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
973 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, 968 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
974 PIXEL_MASK_CAMMING_DISABLE); 969 PIXEL_MASK_CAMMING_DISABLE);
975 970
976 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ 971 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
977 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; 972 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
978 if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) || 973 if (IS_SKL_REVID(dev_priv, SKL_REVID_F0, REVID_FOREVER) ||
979 IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) 974 IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
980 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; 975 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
981 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); 976 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
982 977
983 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */ 978 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
984 if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0)) 979 if (IS_SKYLAKE(dev_priv) || IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
985 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 980 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
986 GEN8_SAMPLER_POWER_BYPASS_DIS); 981 GEN8_SAMPLER_POWER_BYPASS_DIS);
987 982
@@ -1007,8 +1002,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
1007 1002
1008static int skl_tune_iz_hashing(struct intel_engine_cs *engine) 1003static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
1009{ 1004{
1010 struct drm_device *dev = engine->dev; 1005 struct drm_i915_private *dev_priv = engine->i915;
1011 struct drm_i915_private *dev_priv = dev->dev_private;
1012 u8 vals[3] = { 0, 0, 0 }; 1006 u8 vals[3] = { 0, 0, 0 };
1013 unsigned int i; 1007 unsigned int i;
1014 1008
@@ -1049,9 +1043,8 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
1049 1043
1050static int skl_init_workarounds(struct intel_engine_cs *engine) 1044static int skl_init_workarounds(struct intel_engine_cs *engine)
1051{ 1045{
1046 struct drm_i915_private *dev_priv = engine->i915;
1052 int ret; 1047 int ret;
1053 struct drm_device *dev = engine->dev;
1054 struct drm_i915_private *dev_priv = dev->dev_private;
1055 1048
1056 ret = gen9_init_workarounds(engine); 1049 ret = gen9_init_workarounds(engine);
1057 if (ret) 1050 if (ret)
@@ -1062,12 +1055,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
1062 * until D0 which is the default case so this is equivalent to 1055 * until D0 which is the default case so this is equivalent to
1063 * !WaDisablePerCtxtPreemptionGranularityControl:skl 1056 * !WaDisablePerCtxtPreemptionGranularityControl:skl
1064 */ 1057 */
1065 if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) { 1058 if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
1066 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, 1059 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1067 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); 1060 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
1068 } 1061 }
1069 1062
1070 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { 1063 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0)) {
1071 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ 1064 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
1072 I915_WRITE(FF_SLICE_CS_CHICKEN2, 1065 I915_WRITE(FF_SLICE_CS_CHICKEN2,
1073 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); 1066 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
@@ -1076,24 +1069,24 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
1076 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes 1069 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1077 * involving this register should also be added to WA batch as required. 1070 * involving this register should also be added to WA batch as required.
1078 */ 1071 */
1079 if (IS_SKL_REVID(dev, 0, SKL_REVID_E0)) 1072 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
1080 /* WaDisableLSQCROPERFforOCL:skl */ 1073 /* WaDisableLSQCROPERFforOCL:skl */
1081 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 1074 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1082 GEN8_LQSC_RO_PERF_DIS); 1075 GEN8_LQSC_RO_PERF_DIS);
1083 1076
1084 /* WaEnableGapsTsvCreditFix:skl */ 1077 /* WaEnableGapsTsvCreditFix:skl */
1085 if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) { 1078 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
1086 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | 1079 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1087 GEN9_GAPS_TSV_CREDIT_DISABLE)); 1080 GEN9_GAPS_TSV_CREDIT_DISABLE));
1088 } 1081 }
1089 1082
1090 /* WaDisablePowerCompilerClockGating:skl */ 1083 /* WaDisablePowerCompilerClockGating:skl */
1091 if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0)) 1084 if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
1092 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1085 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1093 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1086 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1094 1087
1095 /* This is tied to WaForceContextSaveRestoreNonCoherent */ 1088 /* This is tied to WaForceContextSaveRestoreNonCoherent */
1096 if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) { 1089 if (IS_SKL_REVID(dev_priv, 0, REVID_FOREVER)) {
1097 /* 1090 /*
1098 *Use Force Non-Coherent whenever executing a 3D context. This 1091 *Use Force Non-Coherent whenever executing a 3D context. This
1099 * is a workaround for a possible hang in the unlikely event 1092 * is a workaround for a possible hang in the unlikely event
@@ -1109,13 +1102,13 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
1109 } 1102 }
1110 1103
1111 /* WaBarrierPerformanceFixDisable:skl */ 1104 /* WaBarrierPerformanceFixDisable:skl */
1112 if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0)) 1105 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
1113 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1106 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1114 HDC_FENCE_DEST_SLM_DISABLE | 1107 HDC_FENCE_DEST_SLM_DISABLE |
1115 HDC_BARRIER_PERFORMANCE_DISABLE); 1108 HDC_BARRIER_PERFORMANCE_DISABLE);
1116 1109
1117 /* WaDisableSbeCacheDispatchPortSharing:skl */ 1110 /* WaDisableSbeCacheDispatchPortSharing:skl */
1118 if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) 1111 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
1119 WA_SET_BIT_MASKED( 1112 WA_SET_BIT_MASKED(
1120 GEN7_HALF_SLICE_CHICKEN1, 1113 GEN7_HALF_SLICE_CHICKEN1,
1121 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1114 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1130,9 +1123,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
1130 1123
1131static int bxt_init_workarounds(struct intel_engine_cs *engine) 1124static int bxt_init_workarounds(struct intel_engine_cs *engine)
1132{ 1125{
1126 struct drm_i915_private *dev_priv = engine->i915;
1133 int ret; 1127 int ret;
1134 struct drm_device *dev = engine->dev;
1135 struct drm_i915_private *dev_priv = dev->dev_private;
1136 1128
1137 ret = gen9_init_workarounds(engine); 1129 ret = gen9_init_workarounds(engine);
1138 if (ret) 1130 if (ret)
@@ -1140,11 +1132,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1140 1132
1141 /* WaStoreMultiplePTEenable:bxt */ 1133 /* WaStoreMultiplePTEenable:bxt */
1142 /* This is a requirement according to Hardware specification */ 1134 /* This is a requirement according to Hardware specification */
1143 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 1135 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1144 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); 1136 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1145 1137
1146 /* WaSetClckGatingDisableMedia:bxt */ 1138 /* WaSetClckGatingDisableMedia:bxt */
1147 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1139 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1148 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & 1140 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1149 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); 1141 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1150 } 1142 }
@@ -1154,7 +1146,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1154 STALL_DOP_GATING_DISABLE); 1146 STALL_DOP_GATING_DISABLE);
1155 1147
1156 /* WaDisableSbeCacheDispatchPortSharing:bxt */ 1148 /* WaDisableSbeCacheDispatchPortSharing:bxt */
1157 if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { 1149 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
1158 WA_SET_BIT_MASKED( 1150 WA_SET_BIT_MASKED(
1159 GEN7_HALF_SLICE_CHICKEN1, 1151 GEN7_HALF_SLICE_CHICKEN1,
1160 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1152 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1164,7 +1156,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1164 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */ 1156 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1165 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ 1157 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
1166 /* WaDisableLSQCROPERFforOCL:bxt */ 1158 /* WaDisableLSQCROPERFforOCL:bxt */
1167 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1159 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1168 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1); 1160 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
1169 if (ret) 1161 if (ret)
1170 return ret; 1162 return ret;
@@ -1174,29 +1166,33 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1174 return ret; 1166 return ret;
1175 } 1167 }
1176 1168
1169 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
1170 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
1171 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
1172 L3_HIGH_PRIO_CREDITS(2));
1173
1177 return 0; 1174 return 0;
1178} 1175}
1179 1176
1180int init_workarounds_ring(struct intel_engine_cs *engine) 1177int init_workarounds_ring(struct intel_engine_cs *engine)
1181{ 1178{
1182 struct drm_device *dev = engine->dev; 1179 struct drm_i915_private *dev_priv = engine->i915;
1183 struct drm_i915_private *dev_priv = dev->dev_private;
1184 1180
1185 WARN_ON(engine->id != RCS); 1181 WARN_ON(engine->id != RCS);
1186 1182
1187 dev_priv->workarounds.count = 0; 1183 dev_priv->workarounds.count = 0;
1188 dev_priv->workarounds.hw_whitelist_count[RCS] = 0; 1184 dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
1189 1185
1190 if (IS_BROADWELL(dev)) 1186 if (IS_BROADWELL(dev_priv))
1191 return bdw_init_workarounds(engine); 1187 return bdw_init_workarounds(engine);
1192 1188
1193 if (IS_CHERRYVIEW(dev)) 1189 if (IS_CHERRYVIEW(dev_priv))
1194 return chv_init_workarounds(engine); 1190 return chv_init_workarounds(engine);
1195 1191
1196 if (IS_SKYLAKE(dev)) 1192 if (IS_SKYLAKE(dev_priv))
1197 return skl_init_workarounds(engine); 1193 return skl_init_workarounds(engine);
1198 1194
1199 if (IS_BROXTON(dev)) 1195 if (IS_BROXTON(dev_priv))
1200 return bxt_init_workarounds(engine); 1196 return bxt_init_workarounds(engine);
1201 1197
1202 return 0; 1198 return 0;
@@ -1204,14 +1200,13 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
1204 1200
1205static int init_render_ring(struct intel_engine_cs *engine) 1201static int init_render_ring(struct intel_engine_cs *engine)
1206{ 1202{
1207 struct drm_device *dev = engine->dev; 1203 struct drm_i915_private *dev_priv = engine->i915;
1208 struct drm_i915_private *dev_priv = dev->dev_private;
1209 int ret = init_ring_common(engine); 1204 int ret = init_ring_common(engine);
1210 if (ret) 1205 if (ret)
1211 return ret; 1206 return ret;
1212 1207
1213 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 1208 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
1214 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) 1209 if (IS_GEN(dev_priv, 4, 6))
1215 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 1210 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
1216 1211
1217 /* We need to disable the AsyncFlip performance optimisations in order 1212 /* We need to disable the AsyncFlip performance optimisations in order
@@ -1220,22 +1215,22 @@ static int init_render_ring(struct intel_engine_cs *engine)
1220 * 1215 *
1221 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv 1216 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
1222 */ 1217 */
1223 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) 1218 if (IS_GEN(dev_priv, 6, 7))
1224 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 1219 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1225 1220
1226 /* Required for the hardware to program scanline values for waiting */ 1221 /* Required for the hardware to program scanline values for waiting */
1227 /* WaEnableFlushTlbInvalidationMode:snb */ 1222 /* WaEnableFlushTlbInvalidationMode:snb */
1228 if (INTEL_INFO(dev)->gen == 6) 1223 if (IS_GEN6(dev_priv))
1229 I915_WRITE(GFX_MODE, 1224 I915_WRITE(GFX_MODE,
1230 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 1225 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
1231 1226
1232 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 1227 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
1233 if (IS_GEN7(dev)) 1228 if (IS_GEN7(dev_priv))
1234 I915_WRITE(GFX_MODE_GEN7, 1229 I915_WRITE(GFX_MODE_GEN7,
1235 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 1230 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
1236 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 1231 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
1237 1232
1238 if (IS_GEN6(dev)) { 1233 if (IS_GEN6(dev_priv)) {
1239 /* From the Sandybridge PRM, volume 1 part 3, page 24: 1234 /* From the Sandybridge PRM, volume 1 part 3, page 24:
1240 * "If this bit is set, STCunit will have LRA as replacement 1235 * "If this bit is set, STCunit will have LRA as replacement
1241 * policy. [...] This bit must be reset. LRA replacement 1236 * policy. [...] This bit must be reset. LRA replacement
@@ -1245,19 +1240,18 @@ static int init_render_ring(struct intel_engine_cs *engine)
1245 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 1240 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
1246 } 1241 }
1247 1242
1248 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) 1243 if (IS_GEN(dev_priv, 6, 7))
1249 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1244 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1250 1245
1251 if (HAS_L3_DPF(dev)) 1246 if (HAS_L3_DPF(dev_priv))
1252 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); 1247 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
1253 1248
1254 return init_workarounds_ring(engine); 1249 return init_workarounds_ring(engine);
1255} 1250}
1256 1251
1257static void render_ring_cleanup(struct intel_engine_cs *engine) 1252static void render_ring_cleanup(struct intel_engine_cs *engine)
1258{ 1253{
1259 struct drm_device *dev = engine->dev; 1254 struct drm_i915_private *dev_priv = engine->i915;
1260 struct drm_i915_private *dev_priv = dev->dev_private;
1261 1255
1262 if (dev_priv->semaphore_obj) { 1256 if (dev_priv->semaphore_obj) {
1263 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); 1257 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
@@ -1273,13 +1267,12 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
1273{ 1267{
1274#define MBOX_UPDATE_DWORDS 8 1268#define MBOX_UPDATE_DWORDS 8
1275 struct intel_engine_cs *signaller = signaller_req->engine; 1269 struct intel_engine_cs *signaller = signaller_req->engine;
1276 struct drm_device *dev = signaller->dev; 1270 struct drm_i915_private *dev_priv = signaller_req->i915;
1277 struct drm_i915_private *dev_priv = dev->dev_private;
1278 struct intel_engine_cs *waiter; 1271 struct intel_engine_cs *waiter;
1279 enum intel_engine_id id; 1272 enum intel_engine_id id;
1280 int ret, num_rings; 1273 int ret, num_rings;
1281 1274
1282 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1275 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
1283 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1276 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
1284#undef MBOX_UPDATE_DWORDS 1277#undef MBOX_UPDATE_DWORDS
1285 1278
@@ -1297,7 +1290,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
1297 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); 1290 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
1298 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | 1291 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
1299 PIPE_CONTROL_QW_WRITE | 1292 PIPE_CONTROL_QW_WRITE |
1300 PIPE_CONTROL_FLUSH_ENABLE); 1293 PIPE_CONTROL_CS_STALL);
1301 intel_ring_emit(signaller, lower_32_bits(gtt_offset)); 1294 intel_ring_emit(signaller, lower_32_bits(gtt_offset));
1302 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1295 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
1303 intel_ring_emit(signaller, seqno); 1296 intel_ring_emit(signaller, seqno);
@@ -1315,13 +1308,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
1315{ 1308{
1316#define MBOX_UPDATE_DWORDS 6 1309#define MBOX_UPDATE_DWORDS 6
1317 struct intel_engine_cs *signaller = signaller_req->engine; 1310 struct intel_engine_cs *signaller = signaller_req->engine;
1318 struct drm_device *dev = signaller->dev; 1311 struct drm_i915_private *dev_priv = signaller_req->i915;
1319 struct drm_i915_private *dev_priv = dev->dev_private;
1320 struct intel_engine_cs *waiter; 1312 struct intel_engine_cs *waiter;
1321 enum intel_engine_id id; 1313 enum intel_engine_id id;
1322 int ret, num_rings; 1314 int ret, num_rings;
1323 1315
1324 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1316 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
1325 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1317 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
1326#undef MBOX_UPDATE_DWORDS 1318#undef MBOX_UPDATE_DWORDS
1327 1319
@@ -1354,14 +1346,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
1354 unsigned int num_dwords) 1346 unsigned int num_dwords)
1355{ 1347{
1356 struct intel_engine_cs *signaller = signaller_req->engine; 1348 struct intel_engine_cs *signaller = signaller_req->engine;
1357 struct drm_device *dev = signaller->dev; 1349 struct drm_i915_private *dev_priv = signaller_req->i915;
1358 struct drm_i915_private *dev_priv = dev->dev_private;
1359 struct intel_engine_cs *useless; 1350 struct intel_engine_cs *useless;
1360 enum intel_engine_id id; 1351 enum intel_engine_id id;
1361 int ret, num_rings; 1352 int ret, num_rings;
1362 1353
1363#define MBOX_UPDATE_DWORDS 3 1354#define MBOX_UPDATE_DWORDS 3
1364 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1355 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
1365 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); 1356 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
1366#undef MBOX_UPDATE_DWORDS 1357#undef MBOX_UPDATE_DWORDS
1367 1358
@@ -1420,10 +1411,38 @@ gen6_add_request(struct drm_i915_gem_request *req)
1420 return 0; 1411 return 0;
1421} 1412}
1422 1413
1423static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, 1414static int
1415gen8_render_add_request(struct drm_i915_gem_request *req)
1416{
1417 struct intel_engine_cs *engine = req->engine;
1418 int ret;
1419
1420 if (engine->semaphore.signal)
1421 ret = engine->semaphore.signal(req, 8);
1422 else
1423 ret = intel_ring_begin(req, 8);
1424 if (ret)
1425 return ret;
1426
1427 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
1428 intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB |
1429 PIPE_CONTROL_CS_STALL |
1430 PIPE_CONTROL_QW_WRITE));
1431 intel_ring_emit(engine, intel_hws_seqno_address(req->engine));
1432 intel_ring_emit(engine, 0);
1433 intel_ring_emit(engine, i915_gem_request_get_seqno(req));
1434 /* We're thrashing one dword of HWS. */
1435 intel_ring_emit(engine, 0);
1436 intel_ring_emit(engine, MI_USER_INTERRUPT);
1437 intel_ring_emit(engine, MI_NOOP);
1438 __intel_ring_advance(engine);
1439
1440 return 0;
1441}
1442
1443static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
1424 u32 seqno) 1444 u32 seqno)
1425{ 1445{
1426 struct drm_i915_private *dev_priv = dev->dev_private;
1427 return dev_priv->last_seqno < seqno; 1446 return dev_priv->last_seqno < seqno;
1428} 1447}
1429 1448
@@ -1441,7 +1460,8 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
1441 u32 seqno) 1460 u32 seqno)
1442{ 1461{
1443 struct intel_engine_cs *waiter = waiter_req->engine; 1462 struct intel_engine_cs *waiter = waiter_req->engine;
1444 struct drm_i915_private *dev_priv = waiter->dev->dev_private; 1463 struct drm_i915_private *dev_priv = waiter_req->i915;
1464 struct i915_hw_ppgtt *ppgtt;
1445 int ret; 1465 int ret;
1446 1466
1447 ret = intel_ring_begin(waiter_req, 4); 1467 ret = intel_ring_begin(waiter_req, 4);
@@ -1450,7 +1470,6 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
1450 1470
1451 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | 1471 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
1452 MI_SEMAPHORE_GLOBAL_GTT | 1472 MI_SEMAPHORE_GLOBAL_GTT |
1453 MI_SEMAPHORE_POLL |
1454 MI_SEMAPHORE_SAD_GTE_SDD); 1473 MI_SEMAPHORE_SAD_GTE_SDD);
1455 intel_ring_emit(waiter, seqno); 1474 intel_ring_emit(waiter, seqno);
1456 intel_ring_emit(waiter, 1475 intel_ring_emit(waiter,
@@ -1458,6 +1477,15 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
1458 intel_ring_emit(waiter, 1477 intel_ring_emit(waiter,
1459 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); 1478 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
1460 intel_ring_advance(waiter); 1479 intel_ring_advance(waiter);
1480
1481 /* When the !RCS engines idle waiting upon a semaphore, they lose their
1482 * pagetables and we must reload them before executing the batch.
1483 * We do this on the i915_switch_context() following the wait and
1484 * before the dispatch.
1485 */
1486 ppgtt = waiter_req->ctx->ppgtt;
1487 if (ppgtt && waiter_req->engine->id != RCS)
1488 ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
1461 return 0; 1489 return 0;
1462} 1490}
1463 1491
@@ -1486,7 +1514,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
1486 return ret; 1514 return ret;
1487 1515
1488 /* If seqno wrap happened, omit the wait with no-ops */ 1516 /* If seqno wrap happened, omit the wait with no-ops */
1489 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { 1517 if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
1490 intel_ring_emit(waiter, dw1 | wait_mbox); 1518 intel_ring_emit(waiter, dw1 | wait_mbox);
1491 intel_ring_emit(waiter, seqno); 1519 intel_ring_emit(waiter, seqno);
1492 intel_ring_emit(waiter, 0); 1520 intel_ring_emit(waiter, 0);
@@ -1567,7 +1595,7 @@ pc_render_add_request(struct drm_i915_gem_request *req)
1567static void 1595static void
1568gen6_seqno_barrier(struct intel_engine_cs *engine) 1596gen6_seqno_barrier(struct intel_engine_cs *engine)
1569{ 1597{
1570 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1598 struct drm_i915_private *dev_priv = engine->i915;
1571 1599
1572 /* Workaround to force correct ordering between irq and seqno writes on 1600 /* Workaround to force correct ordering between irq and seqno writes on
1573 * ivb (and maybe also on snb) by reading from a CS register (like 1601 * ivb (and maybe also on snb) by reading from a CS register (like
@@ -1616,8 +1644,7 @@ pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1616static bool 1644static bool
1617gen5_ring_get_irq(struct intel_engine_cs *engine) 1645gen5_ring_get_irq(struct intel_engine_cs *engine)
1618{ 1646{
1619 struct drm_device *dev = engine->dev; 1647 struct drm_i915_private *dev_priv = engine->i915;
1620 struct drm_i915_private *dev_priv = dev->dev_private;
1621 unsigned long flags; 1648 unsigned long flags;
1622 1649
1623 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1650 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1634,8 +1661,7 @@ gen5_ring_get_irq(struct intel_engine_cs *engine)
1634static void 1661static void
1635gen5_ring_put_irq(struct intel_engine_cs *engine) 1662gen5_ring_put_irq(struct intel_engine_cs *engine)
1636{ 1663{
1637 struct drm_device *dev = engine->dev; 1664 struct drm_i915_private *dev_priv = engine->i915;
1638 struct drm_i915_private *dev_priv = dev->dev_private;
1639 unsigned long flags; 1665 unsigned long flags;
1640 1666
1641 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1667 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1647,8 +1673,7 @@ gen5_ring_put_irq(struct intel_engine_cs *engine)
1647static bool 1673static bool
1648i9xx_ring_get_irq(struct intel_engine_cs *engine) 1674i9xx_ring_get_irq(struct intel_engine_cs *engine)
1649{ 1675{
1650 struct drm_device *dev = engine->dev; 1676 struct drm_i915_private *dev_priv = engine->i915;
1651 struct drm_i915_private *dev_priv = dev->dev_private;
1652 unsigned long flags; 1677 unsigned long flags;
1653 1678
1654 if (!intel_irqs_enabled(dev_priv)) 1679 if (!intel_irqs_enabled(dev_priv))
@@ -1668,8 +1693,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *engine)
1668static void 1693static void
1669i9xx_ring_put_irq(struct intel_engine_cs *engine) 1694i9xx_ring_put_irq(struct intel_engine_cs *engine)
1670{ 1695{
1671 struct drm_device *dev = engine->dev; 1696 struct drm_i915_private *dev_priv = engine->i915;
1672 struct drm_i915_private *dev_priv = dev->dev_private;
1673 unsigned long flags; 1697 unsigned long flags;
1674 1698
1675 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1699 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1684,8 +1708,7 @@ i9xx_ring_put_irq(struct intel_engine_cs *engine)
1684static bool 1708static bool
1685i8xx_ring_get_irq(struct intel_engine_cs *engine) 1709i8xx_ring_get_irq(struct intel_engine_cs *engine)
1686{ 1710{
1687 struct drm_device *dev = engine->dev; 1711 struct drm_i915_private *dev_priv = engine->i915;
1688 struct drm_i915_private *dev_priv = dev->dev_private;
1689 unsigned long flags; 1712 unsigned long flags;
1690 1713
1691 if (!intel_irqs_enabled(dev_priv)) 1714 if (!intel_irqs_enabled(dev_priv))
@@ -1705,8 +1728,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *engine)
1705static void 1728static void
1706i8xx_ring_put_irq(struct intel_engine_cs *engine) 1729i8xx_ring_put_irq(struct intel_engine_cs *engine)
1707{ 1730{
1708 struct drm_device *dev = engine->dev; 1731 struct drm_i915_private *dev_priv = engine->i915;
1709 struct drm_i915_private *dev_priv = dev->dev_private;
1710 unsigned long flags; 1732 unsigned long flags;
1711 1733
1712 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1734 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1759,8 +1781,7 @@ i9xx_add_request(struct drm_i915_gem_request *req)
1759static bool 1781static bool
1760gen6_ring_get_irq(struct intel_engine_cs *engine) 1782gen6_ring_get_irq(struct intel_engine_cs *engine)
1761{ 1783{
1762 struct drm_device *dev = engine->dev; 1784 struct drm_i915_private *dev_priv = engine->i915;
1763 struct drm_i915_private *dev_priv = dev->dev_private;
1764 unsigned long flags; 1785 unsigned long flags;
1765 1786
1766 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1787 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1768,10 +1789,10 @@ gen6_ring_get_irq(struct intel_engine_cs *engine)
1768 1789
1769 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1790 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1770 if (engine->irq_refcount++ == 0) { 1791 if (engine->irq_refcount++ == 0) {
1771 if (HAS_L3_DPF(dev) && engine->id == RCS) 1792 if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
1772 I915_WRITE_IMR(engine, 1793 I915_WRITE_IMR(engine,
1773 ~(engine->irq_enable_mask | 1794 ~(engine->irq_enable_mask |
1774 GT_PARITY_ERROR(dev))); 1795 GT_PARITY_ERROR(dev_priv)));
1775 else 1796 else
1776 I915_WRITE_IMR(engine, ~engine->irq_enable_mask); 1797 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1777 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); 1798 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
@@ -1784,14 +1805,13 @@ gen6_ring_get_irq(struct intel_engine_cs *engine)
1784static void 1805static void
1785gen6_ring_put_irq(struct intel_engine_cs *engine) 1806gen6_ring_put_irq(struct intel_engine_cs *engine)
1786{ 1807{
1787 struct drm_device *dev = engine->dev; 1808 struct drm_i915_private *dev_priv = engine->i915;
1788 struct drm_i915_private *dev_priv = dev->dev_private;
1789 unsigned long flags; 1809 unsigned long flags;
1790 1810
1791 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1811 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1792 if (--engine->irq_refcount == 0) { 1812 if (--engine->irq_refcount == 0) {
1793 if (HAS_L3_DPF(dev) && engine->id == RCS) 1813 if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
1794 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); 1814 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
1795 else 1815 else
1796 I915_WRITE_IMR(engine, ~0); 1816 I915_WRITE_IMR(engine, ~0);
1797 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); 1817 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
@@ -1802,8 +1822,7 @@ gen6_ring_put_irq(struct intel_engine_cs *engine)
1802static bool 1822static bool
1803hsw_vebox_get_irq(struct intel_engine_cs *engine) 1823hsw_vebox_get_irq(struct intel_engine_cs *engine)
1804{ 1824{
1805 struct drm_device *dev = engine->dev; 1825 struct drm_i915_private *dev_priv = engine->i915;
1806 struct drm_i915_private *dev_priv = dev->dev_private;
1807 unsigned long flags; 1826 unsigned long flags;
1808 1827
1809 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1828 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1822,8 +1841,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *engine)
1822static void 1841static void
1823hsw_vebox_put_irq(struct intel_engine_cs *engine) 1842hsw_vebox_put_irq(struct intel_engine_cs *engine)
1824{ 1843{
1825 struct drm_device *dev = engine->dev; 1844 struct drm_i915_private *dev_priv = engine->i915;
1826 struct drm_i915_private *dev_priv = dev->dev_private;
1827 unsigned long flags; 1845 unsigned long flags;
1828 1846
1829 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1847 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1837,8 +1855,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *engine)
1837static bool 1855static bool
1838gen8_ring_get_irq(struct intel_engine_cs *engine) 1856gen8_ring_get_irq(struct intel_engine_cs *engine)
1839{ 1857{
1840 struct drm_device *dev = engine->dev; 1858 struct drm_i915_private *dev_priv = engine->i915;
1841 struct drm_i915_private *dev_priv = dev->dev_private;
1842 unsigned long flags; 1859 unsigned long flags;
1843 1860
1844 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1861 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1846,7 +1863,7 @@ gen8_ring_get_irq(struct intel_engine_cs *engine)
1846 1863
1847 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1864 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1848 if (engine->irq_refcount++ == 0) { 1865 if (engine->irq_refcount++ == 0) {
1849 if (HAS_L3_DPF(dev) && engine->id == RCS) { 1866 if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
1850 I915_WRITE_IMR(engine, 1867 I915_WRITE_IMR(engine,
1851 ~(engine->irq_enable_mask | 1868 ~(engine->irq_enable_mask |
1852 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1869 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
@@ -1863,13 +1880,12 @@ gen8_ring_get_irq(struct intel_engine_cs *engine)
1863static void 1880static void
1864gen8_ring_put_irq(struct intel_engine_cs *engine) 1881gen8_ring_put_irq(struct intel_engine_cs *engine)
1865{ 1882{
1866 struct drm_device *dev = engine->dev; 1883 struct drm_i915_private *dev_priv = engine->i915;
1867 struct drm_i915_private *dev_priv = dev->dev_private;
1868 unsigned long flags; 1884 unsigned long flags;
1869 1885
1870 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1886 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1871 if (--engine->irq_refcount == 0) { 1887 if (--engine->irq_refcount == 0) {
1872 if (HAS_L3_DPF(dev) && engine->id == RCS) { 1888 if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
1873 I915_WRITE_IMR(engine, 1889 I915_WRITE_IMR(engine,
1874 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1890 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1875 } else { 1891 } else {
@@ -1991,12 +2007,12 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
1991 2007
1992static void cleanup_phys_status_page(struct intel_engine_cs *engine) 2008static void cleanup_phys_status_page(struct intel_engine_cs *engine)
1993{ 2009{
1994 struct drm_i915_private *dev_priv = to_i915(engine->dev); 2010 struct drm_i915_private *dev_priv = engine->i915;
1995 2011
1996 if (!dev_priv->status_page_dmah) 2012 if (!dev_priv->status_page_dmah)
1997 return; 2013 return;
1998 2014
1999 drm_pci_free(engine->dev, dev_priv->status_page_dmah); 2015 drm_pci_free(dev_priv->dev, dev_priv->status_page_dmah);
2000 engine->status_page.page_addr = NULL; 2016 engine->status_page.page_addr = NULL;
2001} 2017}
2002 2018
@@ -2022,10 +2038,10 @@ static int init_status_page(struct intel_engine_cs *engine)
2022 unsigned flags; 2038 unsigned flags;
2023 int ret; 2039 int ret;
2024 2040
2025 obj = i915_gem_alloc_object(engine->dev, 4096); 2041 obj = i915_gem_object_create(engine->i915->dev, 4096);
2026 if (obj == NULL) { 2042 if (IS_ERR(obj)) {
2027 DRM_ERROR("Failed to allocate status page\n"); 2043 DRM_ERROR("Failed to allocate status page\n");
2028 return -ENOMEM; 2044 return PTR_ERR(obj);
2029 } 2045 }
2030 2046
2031 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 2047 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
@@ -2033,7 +2049,7 @@ static int init_status_page(struct intel_engine_cs *engine)
2033 goto err_unref; 2049 goto err_unref;
2034 2050
2035 flags = 0; 2051 flags = 0;
2036 if (!HAS_LLC(engine->dev)) 2052 if (!HAS_LLC(engine->i915))
2037 /* On g33, we cannot place HWS above 256MiB, so 2053 /* On g33, we cannot place HWS above 256MiB, so
2038 * restrict its pinning to the low mappable arena. 2054 * restrict its pinning to the low mappable arena.
2039 * Though this restriction is not documented for 2055 * Though this restriction is not documented for
@@ -2067,11 +2083,11 @@ err_unref:
2067 2083
2068static int init_phys_status_page(struct intel_engine_cs *engine) 2084static int init_phys_status_page(struct intel_engine_cs *engine)
2069{ 2085{
2070 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2086 struct drm_i915_private *dev_priv = engine->i915;
2071 2087
2072 if (!dev_priv->status_page_dmah) { 2088 if (!dev_priv->status_page_dmah) {
2073 dev_priv->status_page_dmah = 2089 dev_priv->status_page_dmah =
2074 drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE); 2090 drm_pci_alloc(dev_priv->dev, PAGE_SIZE, PAGE_SIZE);
2075 if (!dev_priv->status_page_dmah) 2091 if (!dev_priv->status_page_dmah)
2076 return -ENOMEM; 2092 return -ENOMEM;
2077 } 2093 }
@@ -2084,20 +2100,22 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
2084 2100
2085void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 2101void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
2086{ 2102{
2103 GEM_BUG_ON(ringbuf->vma == NULL);
2104 GEM_BUG_ON(ringbuf->virtual_start == NULL);
2105
2087 if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen) 2106 if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
2088 i915_gem_object_unpin_map(ringbuf->obj); 2107 i915_gem_object_unpin_map(ringbuf->obj);
2089 else 2108 else
2090 iounmap(ringbuf->virtual_start); 2109 i915_vma_unpin_iomap(ringbuf->vma);
2091 ringbuf->virtual_start = NULL; 2110 ringbuf->virtual_start = NULL;
2092 ringbuf->vma = NULL; 2111
2093 i915_gem_object_ggtt_unpin(ringbuf->obj); 2112 i915_gem_object_ggtt_unpin(ringbuf->obj);
2113 ringbuf->vma = NULL;
2094} 2114}
2095 2115
2096int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, 2116int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
2097 struct intel_ringbuffer *ringbuf) 2117 struct intel_ringbuffer *ringbuf)
2098{ 2118{
2099 struct drm_i915_private *dev_priv = to_i915(dev);
2100 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2101 struct drm_i915_gem_object *obj = ringbuf->obj; 2119 struct drm_i915_gem_object *obj = ringbuf->obj;
2102 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ 2120 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
2103 unsigned flags = PIN_OFFSET_BIAS | 4096; 2121 unsigned flags = PIN_OFFSET_BIAS | 4096;
@@ -2131,10 +2149,9 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2131 /* Access through the GTT requires the device to be awake. */ 2149 /* Access through the GTT requires the device to be awake. */
2132 assert_rpm_wakelock_held(dev_priv); 2150 assert_rpm_wakelock_held(dev_priv);
2133 2151
2134 addr = ioremap_wc(ggtt->mappable_base + 2152 addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj));
2135 i915_gem_obj_ggtt_offset(obj), ringbuf->size); 2153 if (IS_ERR(addr)) {
2136 if (addr == NULL) { 2154 ret = PTR_ERR(addr);
2137 ret = -ENOMEM;
2138 goto err_unpin; 2155 goto err_unpin;
2139 } 2156 }
2140 } 2157 }
@@ -2163,9 +2180,9 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
2163 if (!HAS_LLC(dev)) 2180 if (!HAS_LLC(dev))
2164 obj = i915_gem_object_create_stolen(dev, ringbuf->size); 2181 obj = i915_gem_object_create_stolen(dev, ringbuf->size);
2165 if (obj == NULL) 2182 if (obj == NULL)
2166 obj = i915_gem_alloc_object(dev, ringbuf->size); 2183 obj = i915_gem_object_create(dev, ringbuf->size);
2167 if (obj == NULL) 2184 if (IS_ERR(obj))
2168 return -ENOMEM; 2185 return PTR_ERR(obj);
2169 2186
2170 /* mark ring buffers as read-only from GPU side by default */ 2187 /* mark ring buffers as read-only from GPU side by default */
2171 obj->gt_ro = 1; 2188 obj->gt_ro = 1;
@@ -2197,13 +2214,13 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
2197 * of the buffer. 2214 * of the buffer.
2198 */ 2215 */
2199 ring->effective_size = size; 2216 ring->effective_size = size;
2200 if (IS_I830(engine->dev) || IS_845G(engine->dev)) 2217 if (IS_I830(engine->i915) || IS_845G(engine->i915))
2201 ring->effective_size -= 2 * CACHELINE_BYTES; 2218 ring->effective_size -= 2 * CACHELINE_BYTES;
2202 2219
2203 ring->last_retired_head = -1; 2220 ring->last_retired_head = -1;
2204 intel_ring_update_space(ring); 2221 intel_ring_update_space(ring);
2205 2222
2206 ret = intel_alloc_ringbuffer_obj(engine->dev, ring); 2223 ret = intel_alloc_ringbuffer_obj(engine->i915->dev, ring);
2207 if (ret) { 2224 if (ret) {
2208 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", 2225 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
2209 engine->name, ret); 2226 engine->name, ret);
@@ -2226,12 +2243,13 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring)
2226static int intel_init_ring_buffer(struct drm_device *dev, 2243static int intel_init_ring_buffer(struct drm_device *dev,
2227 struct intel_engine_cs *engine) 2244 struct intel_engine_cs *engine)
2228{ 2245{
2246 struct drm_i915_private *dev_priv = to_i915(dev);
2229 struct intel_ringbuffer *ringbuf; 2247 struct intel_ringbuffer *ringbuf;
2230 int ret; 2248 int ret;
2231 2249
2232 WARN_ON(engine->buffer); 2250 WARN_ON(engine->buffer);
2233 2251
2234 engine->dev = dev; 2252 engine->i915 = dev_priv;
2235 INIT_LIST_HEAD(&engine->active_list); 2253 INIT_LIST_HEAD(&engine->active_list);
2236 INIT_LIST_HEAD(&engine->request_list); 2254 INIT_LIST_HEAD(&engine->request_list);
2237 INIT_LIST_HEAD(&engine->execlist_queue); 2255 INIT_LIST_HEAD(&engine->execlist_queue);
@@ -2249,7 +2267,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2249 } 2267 }
2250 engine->buffer = ringbuf; 2268 engine->buffer = ringbuf;
2251 2269
2252 if (I915_NEED_GFX_HWS(dev)) { 2270 if (I915_NEED_GFX_HWS(dev_priv)) {
2253 ret = init_status_page(engine); 2271 ret = init_status_page(engine);
2254 if (ret) 2272 if (ret)
2255 goto error; 2273 goto error;
@@ -2260,7 +2278,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2260 goto error; 2278 goto error;
2261 } 2279 }
2262 2280
2263 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); 2281 ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
2264 if (ret) { 2282 if (ret) {
2265 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", 2283 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
2266 engine->name, ret); 2284 engine->name, ret);
@@ -2286,11 +2304,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
2286 if (!intel_engine_initialized(engine)) 2304 if (!intel_engine_initialized(engine))
2287 return; 2305 return;
2288 2306
2289 dev_priv = to_i915(engine->dev); 2307 dev_priv = engine->i915;
2290 2308
2291 if (engine->buffer) { 2309 if (engine->buffer) {
2292 intel_stop_engine(engine); 2310 intel_stop_engine(engine);
2293 WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); 2311 WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
2294 2312
2295 intel_unpin_ringbuffer_obj(engine->buffer); 2313 intel_unpin_ringbuffer_obj(engine->buffer);
2296 intel_ringbuffer_free(engine->buffer); 2314 intel_ringbuffer_free(engine->buffer);
@@ -2300,7 +2318,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
2300 if (engine->cleanup) 2318 if (engine->cleanup)
2301 engine->cleanup(engine); 2319 engine->cleanup(engine);
2302 2320
2303 if (I915_NEED_GFX_HWS(engine->dev)) { 2321 if (I915_NEED_GFX_HWS(dev_priv)) {
2304 cleanup_status_page(engine); 2322 cleanup_status_page(engine);
2305 } else { 2323 } else {
2306 WARN_ON(engine->id != RCS); 2324 WARN_ON(engine->id != RCS);
@@ -2309,7 +2327,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
2309 2327
2310 i915_cmd_parser_fini_ring(engine); 2328 i915_cmd_parser_fini_ring(engine);
2311 i915_gem_batch_pool_fini(&engine->batch_pool); 2329 i915_gem_batch_pool_fini(&engine->batch_pool);
2312 engine->dev = NULL; 2330 engine->i915 = NULL;
2313} 2331}
2314 2332
2315int intel_engine_idle(struct intel_engine_cs *engine) 2333int intel_engine_idle(struct intel_engine_cs *engine)
@@ -2332,46 +2350,22 @@ int intel_engine_idle(struct intel_engine_cs *engine)
2332 2350
2333int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) 2351int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
2334{ 2352{
2335 request->ringbuf = request->engine->buffer; 2353 int ret;
2336 return 0;
2337}
2338 2354
2339int intel_ring_reserve_space(struct drm_i915_gem_request *request) 2355 /* Flush enough space to reduce the likelihood of waiting after
2340{ 2356 * we start building the request - in which case we will just
2341 /* 2357 * have to repeat work.
2342 * The first call merely notes the reserve request and is common for
2343 * all back ends. The subsequent localised _begin() call actually
2344 * ensures that the reservation is available. Without the begin, if
2345 * the request creator immediately submitted the request without
2346 * adding any commands to it then there might not actually be
2347 * sufficient room for the submission commands.
2348 */ 2358 */
2349 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); 2359 request->reserved_space += LEGACY_REQUEST_SIZE;
2350 2360
2351 return intel_ring_begin(request, 0); 2361 request->ringbuf = request->engine->buffer;
2352}
2353
2354void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
2355{
2356 GEM_BUG_ON(ringbuf->reserved_size);
2357 ringbuf->reserved_size = size;
2358}
2359
2360void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
2361{
2362 GEM_BUG_ON(!ringbuf->reserved_size);
2363 ringbuf->reserved_size = 0;
2364}
2365 2362
2366void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) 2363 ret = intel_ring_begin(request, 0);
2367{ 2364 if (ret)
2368 GEM_BUG_ON(!ringbuf->reserved_size); 2365 return ret;
2369 ringbuf->reserved_size = 0;
2370}
2371 2366
2372void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) 2367 request->reserved_space -= LEGACY_REQUEST_SIZE;
2373{ 2368 return 0;
2374 GEM_BUG_ON(ringbuf->reserved_size);
2375} 2369}
2376 2370
2377static int wait_for_space(struct drm_i915_gem_request *req, int bytes) 2371static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
@@ -2393,7 +2387,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
2393 * 2387 *
2394 * See also i915_gem_request_alloc() and i915_add_request(). 2388 * See also i915_gem_request_alloc() and i915_add_request().
2395 */ 2389 */
2396 GEM_BUG_ON(!ringbuf->reserved_size); 2390 GEM_BUG_ON(!req->reserved_space);
2397 2391
2398 list_for_each_entry(target, &engine->request_list, list) { 2392 list_for_each_entry(target, &engine->request_list, list) {
2399 unsigned space; 2393 unsigned space;
@@ -2428,7 +2422,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
2428 int total_bytes, wait_bytes; 2422 int total_bytes, wait_bytes;
2429 bool need_wrap = false; 2423 bool need_wrap = false;
2430 2424
2431 total_bytes = bytes + ringbuf->reserved_size; 2425 total_bytes = bytes + req->reserved_space;
2432 2426
2433 if (unlikely(bytes > remain_usable)) { 2427 if (unlikely(bytes > remain_usable)) {
2434 /* 2428 /*
@@ -2444,7 +2438,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
2444 * and only need to effectively wait for the reserved 2438 * and only need to effectively wait for the reserved
2445 * size space from the start of ringbuffer. 2439 * size space from the start of ringbuffer.
2446 */ 2440 */
2447 wait_bytes = remain_actual + ringbuf->reserved_size; 2441 wait_bytes = remain_actual + req->reserved_space;
2448 } else { 2442 } else {
2449 /* No wrapping required, just waiting. */ 2443 /* No wrapping required, just waiting. */
2450 wait_bytes = total_bytes; 2444 wait_bytes = total_bytes;
@@ -2501,7 +2495,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
2501 2495
2502void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) 2496void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
2503{ 2497{
2504 struct drm_i915_private *dev_priv = to_i915(engine->dev); 2498 struct drm_i915_private *dev_priv = engine->i915;
2505 2499
2506 /* Our semaphore implementation is strictly monotonic (i.e. we proceed 2500 /* Our semaphore implementation is strictly monotonic (i.e. we proceed
2507 * so long as the semaphore value in the register/page is greater 2501 * so long as the semaphore value in the register/page is greater
@@ -2511,7 +2505,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
2511 * the semaphore value, then when the seqno moves backwards all 2505 * the semaphore value, then when the seqno moves backwards all
2512 * future waits will complete instantly (causing rendering corruption). 2506 * future waits will complete instantly (causing rendering corruption).
2513 */ 2507 */
2514 if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) { 2508 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
2515 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); 2509 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
2516 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); 2510 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
2517 if (HAS_VEBOX(dev_priv)) 2511 if (HAS_VEBOX(dev_priv))
@@ -2537,7 +2531,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
2537static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, 2531static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
2538 u32 value) 2532 u32 value)
2539{ 2533{
2540 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2534 struct drm_i915_private *dev_priv = engine->i915;
2541 2535
2542 /* Every tail move must follow the sequence below */ 2536 /* Every tail move must follow the sequence below */
2543 2537
@@ -2579,7 +2573,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
2579 return ret; 2573 return ret;
2580 2574
2581 cmd = MI_FLUSH_DW; 2575 cmd = MI_FLUSH_DW;
2582 if (INTEL_INFO(engine->dev)->gen >= 8) 2576 if (INTEL_GEN(req->i915) >= 8)
2583 cmd += 1; 2577 cmd += 1;
2584 2578
2585 /* We always require a command barrier so that subsequent 2579 /* We always require a command barrier so that subsequent
@@ -2601,7 +2595,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
2601 intel_ring_emit(engine, cmd); 2595 intel_ring_emit(engine, cmd);
2602 intel_ring_emit(engine, 2596 intel_ring_emit(engine,
2603 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2597 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2604 if (INTEL_INFO(engine->dev)->gen >= 8) { 2598 if (INTEL_GEN(req->i915) >= 8) {
2605 intel_ring_emit(engine, 0); /* upper addr */ 2599 intel_ring_emit(engine, 0); /* upper addr */
2606 intel_ring_emit(engine, 0); /* value */ 2600 intel_ring_emit(engine, 0); /* value */
2607 } else { 2601 } else {
@@ -2692,7 +2686,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
2692 u32 invalidate, u32 flush) 2686 u32 invalidate, u32 flush)
2693{ 2687{
2694 struct intel_engine_cs *engine = req->engine; 2688 struct intel_engine_cs *engine = req->engine;
2695 struct drm_device *dev = engine->dev;
2696 uint32_t cmd; 2689 uint32_t cmd;
2697 int ret; 2690 int ret;
2698 2691
@@ -2701,7 +2694,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
2701 return ret; 2694 return ret;
2702 2695
2703 cmd = MI_FLUSH_DW; 2696 cmd = MI_FLUSH_DW;
2704 if (INTEL_INFO(dev)->gen >= 8) 2697 if (INTEL_GEN(req->i915) >= 8)
2705 cmd += 1; 2698 cmd += 1;
2706 2699
2707 /* We always require a command barrier so that subsequent 2700 /* We always require a command barrier so that subsequent
@@ -2722,7 +2715,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
2722 intel_ring_emit(engine, cmd); 2715 intel_ring_emit(engine, cmd);
2723 intel_ring_emit(engine, 2716 intel_ring_emit(engine,
2724 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2717 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2725 if (INTEL_INFO(dev)->gen >= 8) { 2718 if (INTEL_GEN(req->i915) >= 8) {
2726 intel_ring_emit(engine, 0); /* upper addr */ 2719 intel_ring_emit(engine, 0); /* upper addr */
2727 intel_ring_emit(engine, 0); /* value */ 2720 intel_ring_emit(engine, 0); /* value */
2728 } else { 2721 } else {
@@ -2747,10 +2740,10 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2747 engine->hw_id = 0; 2740 engine->hw_id = 0;
2748 engine->mmio_base = RENDER_RING_BASE; 2741 engine->mmio_base = RENDER_RING_BASE;
2749 2742
2750 if (INTEL_INFO(dev)->gen >= 8) { 2743 if (INTEL_GEN(dev_priv) >= 8) {
2751 if (i915_semaphore_is_enabled(dev)) { 2744 if (i915_semaphore_is_enabled(dev_priv)) {
2752 obj = i915_gem_alloc_object(dev, 4096); 2745 obj = i915_gem_object_create(dev, 4096);
2753 if (obj == NULL) { 2746 if (IS_ERR(obj)) {
2754 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); 2747 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
2755 i915.semaphores = 0; 2748 i915.semaphores = 0;
2756 } else { 2749 } else {
@@ -2766,25 +2759,24 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2766 } 2759 }
2767 2760
2768 engine->init_context = intel_rcs_ctx_init; 2761 engine->init_context = intel_rcs_ctx_init;
2769 engine->add_request = gen6_add_request; 2762 engine->add_request = gen8_render_add_request;
2770 engine->flush = gen8_render_ring_flush; 2763 engine->flush = gen8_render_ring_flush;
2771 engine->irq_get = gen8_ring_get_irq; 2764 engine->irq_get = gen8_ring_get_irq;
2772 engine->irq_put = gen8_ring_put_irq; 2765 engine->irq_put = gen8_ring_put_irq;
2773 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2766 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2774 engine->irq_seqno_barrier = gen6_seqno_barrier;
2775 engine->get_seqno = ring_get_seqno; 2767 engine->get_seqno = ring_get_seqno;
2776 engine->set_seqno = ring_set_seqno; 2768 engine->set_seqno = ring_set_seqno;
2777 if (i915_semaphore_is_enabled(dev)) { 2769 if (i915_semaphore_is_enabled(dev_priv)) {
2778 WARN_ON(!dev_priv->semaphore_obj); 2770 WARN_ON(!dev_priv->semaphore_obj);
2779 engine->semaphore.sync_to = gen8_ring_sync; 2771 engine->semaphore.sync_to = gen8_ring_sync;
2780 engine->semaphore.signal = gen8_rcs_signal; 2772 engine->semaphore.signal = gen8_rcs_signal;
2781 GEN8_RING_SEMAPHORE_INIT(engine); 2773 GEN8_RING_SEMAPHORE_INIT(engine);
2782 } 2774 }
2783 } else if (INTEL_INFO(dev)->gen >= 6) { 2775 } else if (INTEL_GEN(dev_priv) >= 6) {
2784 engine->init_context = intel_rcs_ctx_init; 2776 engine->init_context = intel_rcs_ctx_init;
2785 engine->add_request = gen6_add_request; 2777 engine->add_request = gen6_add_request;
2786 engine->flush = gen7_render_ring_flush; 2778 engine->flush = gen7_render_ring_flush;
2787 if (INTEL_INFO(dev)->gen == 6) 2779 if (IS_GEN6(dev_priv))
2788 engine->flush = gen6_render_ring_flush; 2780 engine->flush = gen6_render_ring_flush;
2789 engine->irq_get = gen6_ring_get_irq; 2781 engine->irq_get = gen6_ring_get_irq;
2790 engine->irq_put = gen6_ring_put_irq; 2782 engine->irq_put = gen6_ring_put_irq;
@@ -2792,7 +2784,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2792 engine->irq_seqno_barrier = gen6_seqno_barrier; 2784 engine->irq_seqno_barrier = gen6_seqno_barrier;
2793 engine->get_seqno = ring_get_seqno; 2785 engine->get_seqno = ring_get_seqno;
2794 engine->set_seqno = ring_set_seqno; 2786 engine->set_seqno = ring_set_seqno;
2795 if (i915_semaphore_is_enabled(dev)) { 2787 if (i915_semaphore_is_enabled(dev_priv)) {
2796 engine->semaphore.sync_to = gen6_ring_sync; 2788 engine->semaphore.sync_to = gen6_ring_sync;
2797 engine->semaphore.signal = gen6_signal; 2789 engine->semaphore.signal = gen6_signal;
2798 /* 2790 /*
@@ -2813,7 +2805,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2813 engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; 2805 engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
2814 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2806 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2815 } 2807 }
2816 } else if (IS_GEN5(dev)) { 2808 } else if (IS_GEN5(dev_priv)) {
2817 engine->add_request = pc_render_add_request; 2809 engine->add_request = pc_render_add_request;
2818 engine->flush = gen4_render_ring_flush; 2810 engine->flush = gen4_render_ring_flush;
2819 engine->get_seqno = pc_render_get_seqno; 2811 engine->get_seqno = pc_render_get_seqno;
@@ -2824,13 +2816,13 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2824 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; 2816 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
2825 } else { 2817 } else {
2826 engine->add_request = i9xx_add_request; 2818 engine->add_request = i9xx_add_request;
2827 if (INTEL_INFO(dev)->gen < 4) 2819 if (INTEL_GEN(dev_priv) < 4)
2828 engine->flush = gen2_render_ring_flush; 2820 engine->flush = gen2_render_ring_flush;
2829 else 2821 else
2830 engine->flush = gen4_render_ring_flush; 2822 engine->flush = gen4_render_ring_flush;
2831 engine->get_seqno = ring_get_seqno; 2823 engine->get_seqno = ring_get_seqno;
2832 engine->set_seqno = ring_set_seqno; 2824 engine->set_seqno = ring_set_seqno;
2833 if (IS_GEN2(dev)) { 2825 if (IS_GEN2(dev_priv)) {
2834 engine->irq_get = i8xx_ring_get_irq; 2826 engine->irq_get = i8xx_ring_get_irq;
2835 engine->irq_put = i8xx_ring_put_irq; 2827 engine->irq_put = i8xx_ring_put_irq;
2836 } else { 2828 } else {
@@ -2841,15 +2833,15 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2841 } 2833 }
2842 engine->write_tail = ring_write_tail; 2834 engine->write_tail = ring_write_tail;
2843 2835
2844 if (IS_HASWELL(dev)) 2836 if (IS_HASWELL(dev_priv))
2845 engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 2837 engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
2846 else if (IS_GEN8(dev)) 2838 else if (IS_GEN8(dev_priv))
2847 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2839 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2848 else if (INTEL_INFO(dev)->gen >= 6) 2840 else if (INTEL_GEN(dev_priv) >= 6)
2849 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2841 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2850 else if (INTEL_INFO(dev)->gen >= 4) 2842 else if (INTEL_GEN(dev_priv) >= 4)
2851 engine->dispatch_execbuffer = i965_dispatch_execbuffer; 2843 engine->dispatch_execbuffer = i965_dispatch_execbuffer;
2852 else if (IS_I830(dev) || IS_845G(dev)) 2844 else if (IS_I830(dev_priv) || IS_845G(dev_priv))
2853 engine->dispatch_execbuffer = i830_dispatch_execbuffer; 2845 engine->dispatch_execbuffer = i830_dispatch_execbuffer;
2854 else 2846 else
2855 engine->dispatch_execbuffer = i915_dispatch_execbuffer; 2847 engine->dispatch_execbuffer = i915_dispatch_execbuffer;
@@ -2857,11 +2849,11 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2857 engine->cleanup = render_ring_cleanup; 2849 engine->cleanup = render_ring_cleanup;
2858 2850
2859 /* Workaround batchbuffer to combat CS tlb bug. */ 2851 /* Workaround batchbuffer to combat CS tlb bug. */
2860 if (HAS_BROKEN_CS_TLB(dev)) { 2852 if (HAS_BROKEN_CS_TLB(dev_priv)) {
2861 obj = i915_gem_alloc_object(dev, I830_WA_SIZE); 2853 obj = i915_gem_object_create(dev, I830_WA_SIZE);
2862 if (obj == NULL) { 2854 if (IS_ERR(obj)) {
2863 DRM_ERROR("Failed to allocate batch bo\n"); 2855 DRM_ERROR("Failed to allocate batch bo\n");
2864 return -ENOMEM; 2856 return PTR_ERR(obj);
2865 } 2857 }
2866 2858
2867 ret = i915_gem_obj_ggtt_pin(obj, 0, 0); 2859 ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
@@ -2879,7 +2871,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2879 if (ret) 2871 if (ret)
2880 return ret; 2872 return ret;
2881 2873
2882 if (INTEL_INFO(dev)->gen >= 5) { 2874 if (INTEL_GEN(dev_priv) >= 5) {
2883 ret = intel_init_pipe_control(engine); 2875 ret = intel_init_pipe_control(engine);
2884 if (ret) 2876 if (ret)
2885 return ret; 2877 return ret;
@@ -2899,24 +2891,24 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2899 engine->hw_id = 1; 2891 engine->hw_id = 1;
2900 2892
2901 engine->write_tail = ring_write_tail; 2893 engine->write_tail = ring_write_tail;
2902 if (INTEL_INFO(dev)->gen >= 6) { 2894 if (INTEL_GEN(dev_priv) >= 6) {
2903 engine->mmio_base = GEN6_BSD_RING_BASE; 2895 engine->mmio_base = GEN6_BSD_RING_BASE;
2904 /* gen6 bsd needs a special wa for tail updates */ 2896 /* gen6 bsd needs a special wa for tail updates */
2905 if (IS_GEN6(dev)) 2897 if (IS_GEN6(dev_priv))
2906 engine->write_tail = gen6_bsd_ring_write_tail; 2898 engine->write_tail = gen6_bsd_ring_write_tail;
2907 engine->flush = gen6_bsd_ring_flush; 2899 engine->flush = gen6_bsd_ring_flush;
2908 engine->add_request = gen6_add_request; 2900 engine->add_request = gen6_add_request;
2909 engine->irq_seqno_barrier = gen6_seqno_barrier; 2901 engine->irq_seqno_barrier = gen6_seqno_barrier;
2910 engine->get_seqno = ring_get_seqno; 2902 engine->get_seqno = ring_get_seqno;
2911 engine->set_seqno = ring_set_seqno; 2903 engine->set_seqno = ring_set_seqno;
2912 if (INTEL_INFO(dev)->gen >= 8) { 2904 if (INTEL_GEN(dev_priv) >= 8) {
2913 engine->irq_enable_mask = 2905 engine->irq_enable_mask =
2914 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 2906 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2915 engine->irq_get = gen8_ring_get_irq; 2907 engine->irq_get = gen8_ring_get_irq;
2916 engine->irq_put = gen8_ring_put_irq; 2908 engine->irq_put = gen8_ring_put_irq;
2917 engine->dispatch_execbuffer = 2909 engine->dispatch_execbuffer =
2918 gen8_ring_dispatch_execbuffer; 2910 gen8_ring_dispatch_execbuffer;
2919 if (i915_semaphore_is_enabled(dev)) { 2911 if (i915_semaphore_is_enabled(dev_priv)) {
2920 engine->semaphore.sync_to = gen8_ring_sync; 2912 engine->semaphore.sync_to = gen8_ring_sync;
2921 engine->semaphore.signal = gen8_xcs_signal; 2913 engine->semaphore.signal = gen8_xcs_signal;
2922 GEN8_RING_SEMAPHORE_INIT(engine); 2914 GEN8_RING_SEMAPHORE_INIT(engine);
@@ -2927,7 +2919,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2927 engine->irq_put = gen6_ring_put_irq; 2919 engine->irq_put = gen6_ring_put_irq;
2928 engine->dispatch_execbuffer = 2920 engine->dispatch_execbuffer =
2929 gen6_ring_dispatch_execbuffer; 2921 gen6_ring_dispatch_execbuffer;
2930 if (i915_semaphore_is_enabled(dev)) { 2922 if (i915_semaphore_is_enabled(dev_priv)) {
2931 engine->semaphore.sync_to = gen6_ring_sync; 2923 engine->semaphore.sync_to = gen6_ring_sync;
2932 engine->semaphore.signal = gen6_signal; 2924 engine->semaphore.signal = gen6_signal;
2933 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; 2925 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
@@ -2948,7 +2940,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2948 engine->add_request = i9xx_add_request; 2940 engine->add_request = i9xx_add_request;
2949 engine->get_seqno = ring_get_seqno; 2941 engine->get_seqno = ring_get_seqno;
2950 engine->set_seqno = ring_set_seqno; 2942 engine->set_seqno = ring_set_seqno;
2951 if (IS_GEN5(dev)) { 2943 if (IS_GEN5(dev_priv)) {
2952 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2944 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2953 engine->irq_get = gen5_ring_get_irq; 2945 engine->irq_get = gen5_ring_get_irq;
2954 engine->irq_put = gen5_ring_put_irq; 2946 engine->irq_put = gen5_ring_put_irq;
@@ -2990,7 +2982,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
2990 engine->irq_put = gen8_ring_put_irq; 2982 engine->irq_put = gen8_ring_put_irq;
2991 engine->dispatch_execbuffer = 2983 engine->dispatch_execbuffer =
2992 gen8_ring_dispatch_execbuffer; 2984 gen8_ring_dispatch_execbuffer;
2993 if (i915_semaphore_is_enabled(dev)) { 2985 if (i915_semaphore_is_enabled(dev_priv)) {
2994 engine->semaphore.sync_to = gen8_ring_sync; 2986 engine->semaphore.sync_to = gen8_ring_sync;
2995 engine->semaphore.signal = gen8_xcs_signal; 2987 engine->semaphore.signal = gen8_xcs_signal;
2996 GEN8_RING_SEMAPHORE_INIT(engine); 2988 GEN8_RING_SEMAPHORE_INIT(engine);
@@ -3017,13 +3009,13 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
3017 engine->irq_seqno_barrier = gen6_seqno_barrier; 3009 engine->irq_seqno_barrier = gen6_seqno_barrier;
3018 engine->get_seqno = ring_get_seqno; 3010 engine->get_seqno = ring_get_seqno;
3019 engine->set_seqno = ring_set_seqno; 3011 engine->set_seqno = ring_set_seqno;
3020 if (INTEL_INFO(dev)->gen >= 8) { 3012 if (INTEL_GEN(dev_priv) >= 8) {
3021 engine->irq_enable_mask = 3013 engine->irq_enable_mask =
3022 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 3014 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
3023 engine->irq_get = gen8_ring_get_irq; 3015 engine->irq_get = gen8_ring_get_irq;
3024 engine->irq_put = gen8_ring_put_irq; 3016 engine->irq_put = gen8_ring_put_irq;
3025 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 3017 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
3026 if (i915_semaphore_is_enabled(dev)) { 3018 if (i915_semaphore_is_enabled(dev_priv)) {
3027 engine->semaphore.sync_to = gen8_ring_sync; 3019 engine->semaphore.sync_to = gen8_ring_sync;
3028 engine->semaphore.signal = gen8_xcs_signal; 3020 engine->semaphore.signal = gen8_xcs_signal;
3029 GEN8_RING_SEMAPHORE_INIT(engine); 3021 GEN8_RING_SEMAPHORE_INIT(engine);
@@ -3033,7 +3025,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
3033 engine->irq_get = gen6_ring_get_irq; 3025 engine->irq_get = gen6_ring_get_irq;
3034 engine->irq_put = gen6_ring_put_irq; 3026 engine->irq_put = gen6_ring_put_irq;
3035 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 3027 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
3036 if (i915_semaphore_is_enabled(dev)) { 3028 if (i915_semaphore_is_enabled(dev_priv)) {
3037 engine->semaphore.signal = gen6_signal; 3029 engine->semaphore.signal = gen6_signal;
3038 engine->semaphore.sync_to = gen6_ring_sync; 3030 engine->semaphore.sync_to = gen6_ring_sync;
3039 /* 3031 /*
@@ -3078,13 +3070,13 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
3078 engine->get_seqno = ring_get_seqno; 3070 engine->get_seqno = ring_get_seqno;
3079 engine->set_seqno = ring_set_seqno; 3071 engine->set_seqno = ring_set_seqno;
3080 3072
3081 if (INTEL_INFO(dev)->gen >= 8) { 3073 if (INTEL_GEN(dev_priv) >= 8) {
3082 engine->irq_enable_mask = 3074 engine->irq_enable_mask =
3083 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 3075 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
3084 engine->irq_get = gen8_ring_get_irq; 3076 engine->irq_get = gen8_ring_get_irq;
3085 engine->irq_put = gen8_ring_put_irq; 3077 engine->irq_put = gen8_ring_put_irq;
3086 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 3078 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
3087 if (i915_semaphore_is_enabled(dev)) { 3079 if (i915_semaphore_is_enabled(dev_priv)) {
3088 engine->semaphore.sync_to = gen8_ring_sync; 3080 engine->semaphore.sync_to = gen8_ring_sync;
3089 engine->semaphore.signal = gen8_xcs_signal; 3081 engine->semaphore.signal = gen8_xcs_signal;
3090 GEN8_RING_SEMAPHORE_INIT(engine); 3082 GEN8_RING_SEMAPHORE_INIT(engine);
@@ -3094,7 +3086,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
3094 engine->irq_get = hsw_vebox_get_irq; 3086 engine->irq_get = hsw_vebox_get_irq;
3095 engine->irq_put = hsw_vebox_put_irq; 3087 engine->irq_put = hsw_vebox_put_irq;
3096 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 3088 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
3097 if (i915_semaphore_is_enabled(dev)) { 3089 if (i915_semaphore_is_enabled(dev_priv)) {
3098 engine->semaphore.sync_to = gen6_ring_sync; 3090 engine->semaphore.sync_to = gen6_ring_sync;
3099 engine->semaphore.signal = gen6_signal; 3091 engine->semaphore.signal = gen6_signal;
3100 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; 3092 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ff126485d398..b33c876fed20 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -107,7 +107,6 @@ struct intel_ringbuffer {
107 int space; 107 int space;
108 int size; 108 int size;
109 int effective_size; 109 int effective_size;
110 int reserved_size;
111 110
112 /** We track the position of the requests in the ring buffer, and 111 /** We track the position of the requests in the ring buffer, and
113 * when each is retired we increment last_retired_head as the GPU 112 * when each is retired we increment last_retired_head as the GPU
@@ -120,7 +119,7 @@ struct intel_ringbuffer {
120 u32 last_retired_head; 119 u32 last_retired_head;
121}; 120};
122 121
123struct intel_context; 122struct i915_gem_context;
124struct drm_i915_reg_table; 123struct drm_i915_reg_table;
125 124
126/* 125/*
@@ -142,7 +141,8 @@ struct i915_ctx_workarounds {
142 struct drm_i915_gem_object *obj; 141 struct drm_i915_gem_object *obj;
143}; 142};
144 143
145struct intel_engine_cs { 144struct intel_engine_cs {
145 struct drm_i915_private *i915;
146 const char *name; 146 const char *name;
147 enum intel_engine_id { 147 enum intel_engine_id {
148 RCS = 0, 148 RCS = 0,
@@ -157,7 +157,6 @@ struct intel_engine_cs {
157 unsigned int hw_id; 157 unsigned int hw_id;
158 unsigned int guc_id; /* XXX same as hw_id? */ 158 unsigned int guc_id; /* XXX same as hw_id? */
159 u32 mmio_base; 159 u32 mmio_base;
160 struct drm_device *dev;
161 struct intel_ringbuffer *buffer; 160 struct intel_ringbuffer *buffer;
162 struct list_head buffers; 161 struct list_head buffers;
163 162
@@ -268,7 +267,6 @@ struct intel_engine_cs {
268 struct tasklet_struct irq_tasklet; 267 struct tasklet_struct irq_tasklet;
269 spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */ 268 spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
270 struct list_head execlist_queue; 269 struct list_head execlist_queue;
271 struct list_head execlist_retired_req_list;
272 unsigned int fw_domains; 270 unsigned int fw_domains;
273 unsigned int next_context_status_buffer; 271 unsigned int next_context_status_buffer;
274 unsigned int idle_lite_restore_wa; 272 unsigned int idle_lite_restore_wa;
@@ -312,7 +310,7 @@ struct intel_engine_cs {
312 310
313 wait_queue_head_t irq_queue; 311 wait_queue_head_t irq_queue;
314 312
315 struct intel_context *last_context; 313 struct i915_gem_context *last_context;
316 314
317 struct intel_ring_hangcheck hangcheck; 315 struct intel_ring_hangcheck hangcheck;
318 316
@@ -352,7 +350,7 @@ struct intel_engine_cs {
352static inline bool 350static inline bool
353intel_engine_initialized(struct intel_engine_cs *engine) 351intel_engine_initialized(struct intel_engine_cs *engine)
354{ 352{
355 return engine->dev != NULL; 353 return engine->i915 != NULL;
356} 354}
357 355
358static inline unsigned 356static inline unsigned
@@ -427,7 +425,7 @@ intel_write_status_page(struct intel_engine_cs *engine,
427 425
428struct intel_ringbuffer * 426struct intel_ringbuffer *
429intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); 427intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
430int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, 428int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
431 struct intel_ringbuffer *ringbuf); 429 struct intel_ringbuffer *ringbuf);
432void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 430void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
433void intel_ringbuffer_free(struct intel_ringbuffer *ring); 431void intel_ringbuffer_free(struct intel_ringbuffer *ring);
@@ -486,26 +484,15 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
486/* 484/*
487 * Arbitrary size for largest possible 'add request' sequence. The code paths 485 * Arbitrary size for largest possible 'add request' sequence. The code paths
488 * are complex and variable. Empirical measurement shows that the worst case 486 * are complex and variable. Empirical measurement shows that the worst case
489 * is ILK at 136 words. Reserving too much is better than reserving too little 487 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
490 * as that allows for corner cases that might have been missed. So the figure 488 * we need to allocate double the largest single packet within that emission
491 * has been rounded up to 160 words. 489 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
492 */ 490 */
493#define MIN_SPACE_FOR_ADD_REQUEST 160 491#define MIN_SPACE_FOR_ADD_REQUEST 336
494 492
495/* 493static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
496 * Reserve space in the ring to guarantee that the i915_add_request() call 494{
497 * will always have sufficient room to do its stuff. The request creation 495 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
498 * code calls this automatically. 496}
499 */
500void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
501/* Cancel the reservation, e.g. because the request is being discarded. */
502void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
503/* Use the reserved space - for use by i915_add_request() only. */
504void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
505/* Finish with the reserved space - for use by i915_add_request() only. */
506void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
507
508/* Legacy ringbuffer specific portion of reservation code: */
509int intel_ring_reserve_space(struct drm_i915_gem_request *request);
510 497
511#endif /* _INTEL_RINGBUFFER_H_ */ 498#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 7fb1da4e7fc3..fe8faf30bda7 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -806,15 +806,27 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
806 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; 806 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
807} 807}
808 808
809static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
810{
811 u32 tmp = I915_READ(DBUF_CTL);
812
813 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
814 (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
815 "Unexpected DBuf power power state (0x%08x)\n", tmp);
816}
817
809static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 818static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
810 struct i915_power_well *power_well) 819 struct i915_power_well *power_well)
811{ 820{
812 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 821 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
813 822
814 if (IS_BROXTON(dev_priv)) { 823 WARN_ON(dev_priv->cdclk_freq !=
815 broxton_cdclk_verify_state(dev_priv); 824 dev_priv->display.get_display_clock_speed(dev_priv->dev));
825
826 gen9_assert_dbuf_enabled(dev_priv);
827
828 if (IS_BROXTON(dev_priv))
816 broxton_ddi_phy_verify_state(dev_priv); 829 broxton_ddi_phy_verify_state(dev_priv);
817 }
818} 830}
819 831
820static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 832static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
@@ -948,6 +960,11 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
948 */ 960 */
949 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 961 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
950 I915_WRITE(CBR1_VLV, 0); 962 I915_WRITE(CBR1_VLV, 0);
963
964 WARN_ON(dev_priv->rawclk_freq == 0);
965
966 I915_WRITE(RAWCLK_FREQ_VLV,
967 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
951} 968}
952 969
953static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 970static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
@@ -2171,6 +2188,28 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
2171 mutex_unlock(&power_domains->lock); 2188 mutex_unlock(&power_domains->lock);
2172} 2189}
2173 2190
2191static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2192{
2193 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
2194 POSTING_READ(DBUF_CTL);
2195
2196 udelay(10);
2197
2198 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
2199 DRM_ERROR("DBuf power enable timeout\n");
2200}
2201
2202static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2203{
2204 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
2205 POSTING_READ(DBUF_CTL);
2206
2207 udelay(10);
2208
2209 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
2210 DRM_ERROR("DBuf power disable timeout!\n");
2211}
2212
2174static void skl_display_core_init(struct drm_i915_private *dev_priv, 2213static void skl_display_core_init(struct drm_i915_private *dev_priv,
2175 bool resume) 2214 bool resume)
2176{ 2215{
@@ -2195,12 +2234,11 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
2195 2234
2196 mutex_unlock(&power_domains->lock); 2235 mutex_unlock(&power_domains->lock);
2197 2236
2198 if (!resume)
2199 return;
2200
2201 skl_init_cdclk(dev_priv); 2237 skl_init_cdclk(dev_priv);
2202 2238
2203 if (dev_priv->csr.dmc_payload) 2239 gen9_dbuf_enable(dev_priv);
2240
2241 if (resume && dev_priv->csr.dmc_payload)
2204 intel_csr_load_program(dev_priv); 2242 intel_csr_load_program(dev_priv);
2205} 2243}
2206 2244
@@ -2211,6 +2249,8 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2211 2249
2212 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2250 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2213 2251
2252 gen9_dbuf_disable(dev_priv);
2253
2214 skl_uninit_cdclk(dev_priv); 2254 skl_uninit_cdclk(dev_priv);
2215 2255
2216 /* The spec doesn't call for removing the reset handshake flag */ 2256 /* The spec doesn't call for removing the reset handshake flag */
@@ -2255,9 +2295,11 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
2255 mutex_unlock(&power_domains->lock); 2295 mutex_unlock(&power_domains->lock);
2256 2296
2257 broxton_init_cdclk(dev_priv); 2297 broxton_init_cdclk(dev_priv);
2298
2299 gen9_dbuf_enable(dev_priv);
2300
2258 broxton_ddi_phy_init(dev_priv); 2301 broxton_ddi_phy_init(dev_priv);
2259 2302
2260 broxton_cdclk_verify_state(dev_priv);
2261 broxton_ddi_phy_verify_state(dev_priv); 2303 broxton_ddi_phy_verify_state(dev_priv);
2262 2304
2263 if (resume && dev_priv->csr.dmc_payload) 2305 if (resume && dev_priv->csr.dmc_payload)
@@ -2272,6 +2314,9 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
2272 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2314 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2273 2315
2274 broxton_ddi_phy_uninit(dev_priv); 2316 broxton_ddi_phy_uninit(dev_priv);
2317
2318 gen9_dbuf_disable(dev_priv);
2319
2275 broxton_uninit_cdclk(dev_priv); 2320 broxton_uninit_cdclk(dev_priv);
2276 2321
2277 /* The spec doesn't call for removing the reset handshake flag */ 2322 /* The spec doesn't call for removing the reset handshake flag */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2128fae5687d..ab2d0658abe6 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2191,7 +2191,6 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
2191static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { 2191static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
2192 .get_modes = intel_sdvo_get_modes, 2192 .get_modes = intel_sdvo_get_modes,
2193 .mode_valid = intel_sdvo_mode_valid, 2193 .mode_valid = intel_sdvo_mode_valid,
2194 .best_encoder = intel_best_encoder,
2195}; 2194};
2196 2195
2197static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) 2196static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
@@ -2981,7 +2980,7 @@ bool intel_sdvo_init(struct drm_device *dev,
2981 intel_encoder = &intel_sdvo->base; 2980 intel_encoder = &intel_sdvo->base;
2982 intel_encoder->type = INTEL_OUTPUT_SDVO; 2981 intel_encoder->type = INTEL_OUTPUT_SDVO;
2983 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0, 2982 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
2984 NULL); 2983 "SDVO %c", port_name(port));
2985 2984
2986 /* Read the regs to test if we can talk to the device */ 2985 /* Read the regs to test if we can talk to the device */
2987 for (i = 0; i < 0x40; i++) { 2986 for (i = 0; i < 0x40; i++) {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 0f3e2303e0e9..324ccb06397d 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -80,9 +80,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
80 */ 80 */
81void intel_pipe_update_start(struct intel_crtc *crtc) 81void intel_pipe_update_start(struct intel_crtc *crtc)
82{ 82{
83 struct drm_device *dev = crtc->base.dev;
84 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 83 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
85 enum pipe pipe = crtc->pipe;
86 long timeout = msecs_to_jiffies_timeout(1); 84 long timeout = msecs_to_jiffies_timeout(1);
87 int scanline, min, max, vblank_start; 85 int scanline, min, max, vblank_start;
88 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); 86 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
@@ -139,8 +137,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
139 137
140 crtc->debug.scanline_start = scanline; 138 crtc->debug.scanline_start = scanline;
141 crtc->debug.start_vbl_time = ktime_get(); 139 crtc->debug.start_vbl_time = ktime_get();
142 crtc->debug.start_vbl_count = 140 crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
143 dev->driver->get_vblank_counter(dev, pipe);
144 141
145 trace_i915_pipe_update_vblank_evaded(crtc); 142 trace_i915_pipe_update_vblank_evaded(crtc);
146} 143}
@@ -154,14 +151,19 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
154 * re-enables interrupts and verifies the update was actually completed 151 * re-enables interrupts and verifies the update was actually completed
155 * before a vblank using the value of @start_vbl_count. 152 * before a vblank using the value of @start_vbl_count.
156 */ 153 */
157void intel_pipe_update_end(struct intel_crtc *crtc) 154void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work)
158{ 155{
159 struct drm_device *dev = crtc->base.dev;
160 enum pipe pipe = crtc->pipe; 156 enum pipe pipe = crtc->pipe;
161 int scanline_end = intel_get_crtc_scanline(crtc); 157 int scanline_end = intel_get_crtc_scanline(crtc);
162 u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe); 158 u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
163 ktime_t end_vbl_time = ktime_get(); 159 ktime_t end_vbl_time = ktime_get();
164 160
161 if (work) {
162 work->flip_queued_vblank = end_vbl_count;
163 smp_mb__before_atomic();
164 atomic_set(&work->pending, 1);
165 }
166
165 trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end); 167 trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
166 168
167 local_irq_enable(); 169 local_irq_enable();
@@ -203,8 +205,6 @@ skl_update_plane(struct drm_plane *drm_plane,
203 uint32_t y = plane_state->src.y1 >> 16; 205 uint32_t y = plane_state->src.y1 >> 16;
204 uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; 206 uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
205 uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; 207 uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
206 const struct intel_scaler *scaler =
207 &crtc_state->scaler_state.scalers[plane_state->scaler_id];
208 208
209 plane_ctl = PLANE_CTL_ENABLE | 209 plane_ctl = PLANE_CTL_ENABLE |
210 PLANE_CTL_PIPE_GAMMA_ENABLE | 210 PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -260,13 +260,16 @@ skl_update_plane(struct drm_plane *drm_plane,
260 260
261 /* program plane scaler */ 261 /* program plane scaler */
262 if (plane_state->scaler_id >= 0) { 262 if (plane_state->scaler_id >= 0) {
263 uint32_t ps_ctrl = 0;
264 int scaler_id = plane_state->scaler_id; 263 int scaler_id = plane_state->scaler_id;
264 const struct intel_scaler *scaler;
265 265
266 DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane, 266 DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
267 PS_PLANE_SEL(plane)); 267 PS_PLANE_SEL(plane));
268 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode; 268
269 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); 269 scaler = &crtc_state->scaler_state.scalers[scaler_id];
270
271 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
272 PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode);
270 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); 273 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
271 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); 274 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
272 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), 275 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),
@@ -1111,10 +1114,18 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1111 1114
1112 possible_crtcs = (1 << pipe); 1115 possible_crtcs = (1 << pipe);
1113 1116
1114 ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, 1117 if (INTEL_INFO(dev)->gen >= 9)
1115 &intel_plane_funcs, 1118 ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
1116 plane_formats, num_plane_formats, 1119 &intel_plane_funcs,
1117 DRM_PLANE_TYPE_OVERLAY, NULL); 1120 plane_formats, num_plane_formats,
1121 DRM_PLANE_TYPE_OVERLAY,
1122 "plane %d%c", plane + 2, pipe_name(pipe));
1123 else
1124 ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
1125 &intel_plane_funcs,
1126 plane_formats, num_plane_formats,
1127 DRM_PLANE_TYPE_OVERLAY,
1128 "sprite %c", sprite_name(pipe, plane));
1118 if (ret) 1129 if (ret)
1119 goto fail; 1130 goto fail;
1120 1131
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 223129d3c765..7ac9e9b0e2c3 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1512,7 +1512,6 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
1512static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { 1512static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
1513 .mode_valid = intel_tv_mode_valid, 1513 .mode_valid = intel_tv_mode_valid,
1514 .get_modes = intel_tv_get_modes, 1514 .get_modes = intel_tv_get_modes,
1515 .best_encoder = intel_best_encoder,
1516}; 1515};
1517 1516
1518static const struct drm_encoder_funcs intel_tv_enc_funcs = { 1517static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1591,7 +1590,7 @@ intel_tv_init(struct drm_device *dev)
1591 DRM_MODE_CONNECTOR_SVIDEO); 1590 DRM_MODE_CONNECTOR_SVIDEO);
1592 1591
1593 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, 1592 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
1594 DRM_MODE_ENCODER_TVDAC, NULL); 1593 DRM_MODE_ENCODER_TVDAC, "TV");
1595 1594
1596 intel_encoder->compute_config = intel_tv_compute_config; 1595 intel_encoder->compute_config = intel_tv_compute_config;
1597 intel_encoder->get_config = intel_tv_get_config; 1596 intel_encoder->get_config = intel_tv_get_config;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4f1dfe616856..c1ca458d688e 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -248,9 +248,9 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
248 return HRTIMER_NORESTART; 248 return HRTIMER_NORESTART;
249} 249}
250 250
251void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) 251void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
252 bool restore)
252{ 253{
253 struct drm_i915_private *dev_priv = dev->dev_private;
254 unsigned long irqflags; 254 unsigned long irqflags;
255 struct intel_uncore_forcewake_domain *domain; 255 struct intel_uncore_forcewake_domain *domain;
256 int retry_count = 100; 256 int retry_count = 100;
@@ -304,7 +304,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
304 if (fw) 304 if (fw)
305 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 305 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
306 306
307 if (IS_GEN6(dev) || IS_GEN7(dev)) 307 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
308 dev_priv->uncore.fifo_count = 308 dev_priv->uncore.fifo_count =
309 fifo_free_entries(dev_priv); 309 fifo_free_entries(dev_priv);
310 } 310 }
@@ -400,43 +400,42 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
400 return false; 400 return false;
401} 401}
402 402
403static void __intel_uncore_early_sanitize(struct drm_device *dev, 403static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
404 bool restore_forcewake) 404 bool restore_forcewake)
405{ 405{
406 struct drm_i915_private *dev_priv = dev->dev_private;
407
408 /* clear out unclaimed reg detection bit */ 406 /* clear out unclaimed reg detection bit */
409 if (check_for_unclaimed_mmio(dev_priv)) 407 if (check_for_unclaimed_mmio(dev_priv))
410 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); 408 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
411 409
412 /* clear out old GT FIFO errors */ 410 /* clear out old GT FIFO errors */
413 if (IS_GEN6(dev) || IS_GEN7(dev)) 411 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
414 __raw_i915_write32(dev_priv, GTFIFODBG, 412 __raw_i915_write32(dev_priv, GTFIFODBG,
415 __raw_i915_read32(dev_priv, GTFIFODBG)); 413 __raw_i915_read32(dev_priv, GTFIFODBG));
416 414
417 /* WaDisableShadowRegForCpd:chv */ 415 /* WaDisableShadowRegForCpd:chv */
418 if (IS_CHERRYVIEW(dev)) { 416 if (IS_CHERRYVIEW(dev_priv)) {
419 __raw_i915_write32(dev_priv, GTFIFOCTL, 417 __raw_i915_write32(dev_priv, GTFIFOCTL,
420 __raw_i915_read32(dev_priv, GTFIFOCTL) | 418 __raw_i915_read32(dev_priv, GTFIFOCTL) |
421 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | 419 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
422 GT_FIFO_CTL_RC6_POLICY_STALL); 420 GT_FIFO_CTL_RC6_POLICY_STALL);
423 } 421 }
424 422
425 intel_uncore_forcewake_reset(dev, restore_forcewake); 423 intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
426} 424}
427 425
428void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) 426void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
427 bool restore_forcewake)
429{ 428{
430 __intel_uncore_early_sanitize(dev, restore_forcewake); 429 __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
431 i915_check_and_clear_faults(dev); 430 i915_check_and_clear_faults(dev_priv);
432} 431}
433 432
434void intel_uncore_sanitize(struct drm_device *dev) 433void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
435{ 434{
436 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); 435 i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
437 436
438 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 437 /* BIOS often leaves RC6 enabled, but disable it for hw init */
439 intel_disable_gt_powersave(dev); 438 intel_disable_gt_powersave(dev_priv);
440} 439}
441 440
442static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 441static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
@@ -1233,14 +1232,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
1233 fw_domain_reset(d); 1232 fw_domain_reset(d);
1234} 1233}
1235 1234
1236static void intel_uncore_fw_domains_init(struct drm_device *dev) 1235static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1237{ 1236{
1238 struct drm_i915_private *dev_priv = dev->dev_private;
1239
1240 if (INTEL_INFO(dev_priv)->gen <= 5) 1237 if (INTEL_INFO(dev_priv)->gen <= 5)
1241 return; 1238 return;
1242 1239
1243 if (IS_GEN9(dev)) { 1240 if (IS_GEN9(dev_priv)) {
1244 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1241 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1245 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1242 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1246 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1243 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
@@ -1251,9 +1248,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1251 FORCEWAKE_ACK_BLITTER_GEN9); 1248 FORCEWAKE_ACK_BLITTER_GEN9);
1252 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1249 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1253 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1250 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1254 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1251 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1255 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1252 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1256 if (!IS_CHERRYVIEW(dev)) 1253 if (!IS_CHERRYVIEW(dev_priv))
1257 dev_priv->uncore.funcs.force_wake_put = 1254 dev_priv->uncore.funcs.force_wake_put =
1258 fw_domains_put_with_fifo; 1255 fw_domains_put_with_fifo;
1259 else 1256 else
@@ -1262,17 +1259,17 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1262 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1259 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1263 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1260 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1264 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1261 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1265 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1262 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1266 dev_priv->uncore.funcs.force_wake_get = 1263 dev_priv->uncore.funcs.force_wake_get =
1267 fw_domains_get_with_thread_status; 1264 fw_domains_get_with_thread_status;
1268 if (IS_HASWELL(dev)) 1265 if (IS_HASWELL(dev_priv))
1269 dev_priv->uncore.funcs.force_wake_put = 1266 dev_priv->uncore.funcs.force_wake_put =
1270 fw_domains_put_with_fifo; 1267 fw_domains_put_with_fifo;
1271 else 1268 else
1272 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1269 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1273 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1270 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1274 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1271 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1275 } else if (IS_IVYBRIDGE(dev)) { 1272 } else if (IS_IVYBRIDGE(dev_priv)) {
1276 u32 ecobus; 1273 u32 ecobus;
1277 1274
1278 /* IVB configs may use multi-threaded forcewake */ 1275 /* IVB configs may use multi-threaded forcewake */
@@ -1302,11 +1299,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1302 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1299 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1303 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1300 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1304 1301
1305 mutex_lock(&dev->struct_mutex);
1306 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); 1302 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1307 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1303 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1308 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); 1304 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1309 mutex_unlock(&dev->struct_mutex);
1310 1305
1311 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1306 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1312 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1307 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
@@ -1314,7 +1309,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1314 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1309 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1315 FORCEWAKE, FORCEWAKE_ACK); 1310 FORCEWAKE, FORCEWAKE_ACK);
1316 } 1311 }
1317 } else if (IS_GEN6(dev)) { 1312 } else if (IS_GEN6(dev_priv)) {
1318 dev_priv->uncore.funcs.force_wake_get = 1313 dev_priv->uncore.funcs.force_wake_get =
1319 fw_domains_get_with_thread_status; 1314 fw_domains_get_with_thread_status;
1320 dev_priv->uncore.funcs.force_wake_put = 1315 dev_priv->uncore.funcs.force_wake_put =
@@ -1327,26 +1322,24 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1327 WARN_ON(dev_priv->uncore.fw_domains == 0); 1322 WARN_ON(dev_priv->uncore.fw_domains == 0);
1328} 1323}
1329 1324
1330void intel_uncore_init(struct drm_device *dev) 1325void intel_uncore_init(struct drm_i915_private *dev_priv)
1331{ 1326{
1332 struct drm_i915_private *dev_priv = dev->dev_private; 1327 i915_check_vgpu(dev_priv);
1333
1334 i915_check_vgpu(dev);
1335 1328
1336 intel_uncore_edram_detect(dev_priv); 1329 intel_uncore_edram_detect(dev_priv);
1337 intel_uncore_fw_domains_init(dev); 1330 intel_uncore_fw_domains_init(dev_priv);
1338 __intel_uncore_early_sanitize(dev, false); 1331 __intel_uncore_early_sanitize(dev_priv, false);
1339 1332
1340 dev_priv->uncore.unclaimed_mmio_check = 1; 1333 dev_priv->uncore.unclaimed_mmio_check = 1;
1341 1334
1342 switch (INTEL_INFO(dev)->gen) { 1335 switch (INTEL_INFO(dev_priv)->gen) {
1343 default: 1336 default:
1344 case 9: 1337 case 9:
1345 ASSIGN_WRITE_MMIO_VFUNCS(gen9); 1338 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1346 ASSIGN_READ_MMIO_VFUNCS(gen9); 1339 ASSIGN_READ_MMIO_VFUNCS(gen9);
1347 break; 1340 break;
1348 case 8: 1341 case 8:
1349 if (IS_CHERRYVIEW(dev)) { 1342 if (IS_CHERRYVIEW(dev_priv)) {
1350 ASSIGN_WRITE_MMIO_VFUNCS(chv); 1343 ASSIGN_WRITE_MMIO_VFUNCS(chv);
1351 ASSIGN_READ_MMIO_VFUNCS(chv); 1344 ASSIGN_READ_MMIO_VFUNCS(chv);
1352 1345
@@ -1357,13 +1350,13 @@ void intel_uncore_init(struct drm_device *dev)
1357 break; 1350 break;
1358 case 7: 1351 case 7:
1359 case 6: 1352 case 6:
1360 if (IS_HASWELL(dev)) { 1353 if (IS_HASWELL(dev_priv)) {
1361 ASSIGN_WRITE_MMIO_VFUNCS(hsw); 1354 ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1362 } else { 1355 } else {
1363 ASSIGN_WRITE_MMIO_VFUNCS(gen6); 1356 ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1364 } 1357 }
1365 1358
1366 if (IS_VALLEYVIEW(dev)) { 1359 if (IS_VALLEYVIEW(dev_priv)) {
1367 ASSIGN_READ_MMIO_VFUNCS(vlv); 1360 ASSIGN_READ_MMIO_VFUNCS(vlv);
1368 } else { 1361 } else {
1369 ASSIGN_READ_MMIO_VFUNCS(gen6); 1362 ASSIGN_READ_MMIO_VFUNCS(gen6);
@@ -1381,24 +1374,24 @@ void intel_uncore_init(struct drm_device *dev)
1381 break; 1374 break;
1382 } 1375 }
1383 1376
1384 if (intel_vgpu_active(dev)) { 1377 if (intel_vgpu_active(dev_priv)) {
1385 ASSIGN_WRITE_MMIO_VFUNCS(vgpu); 1378 ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1386 ASSIGN_READ_MMIO_VFUNCS(vgpu); 1379 ASSIGN_READ_MMIO_VFUNCS(vgpu);
1387 } 1380 }
1388 1381
1389 i915_check_and_clear_faults(dev); 1382 i915_check_and_clear_faults(dev_priv);
1390} 1383}
1391#undef ASSIGN_WRITE_MMIO_VFUNCS 1384#undef ASSIGN_WRITE_MMIO_VFUNCS
1392#undef ASSIGN_READ_MMIO_VFUNCS 1385#undef ASSIGN_READ_MMIO_VFUNCS
1393 1386
1394void intel_uncore_fini(struct drm_device *dev) 1387void intel_uncore_fini(struct drm_i915_private *dev_priv)
1395{ 1388{
1396 /* Paranoia: make sure we have disabled everything before we exit. */ 1389 /* Paranoia: make sure we have disabled everything before we exit. */
1397 intel_uncore_sanitize(dev); 1390 intel_uncore_sanitize(dev_priv);
1398 intel_uncore_forcewake_reset(dev, false); 1391 intel_uncore_forcewake_reset(dev_priv, false);
1399} 1392}
1400 1393
1401#define GEN_RANGE(l, h) GENMASK(h, l) 1394#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
1402 1395
1403static const struct register_whitelist { 1396static const struct register_whitelist {
1404 i915_reg_t offset_ldw, offset_udw; 1397 i915_reg_t offset_ldw, offset_udw;
@@ -1423,7 +1416,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
1423 1416
1424 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1417 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1425 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) && 1418 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1426 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1419 (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask))
1427 break; 1420 break;
1428 } 1421 }
1429 1422
@@ -1467,83 +1460,47 @@ out:
1467 return ret; 1460 return ret;
1468} 1461}
1469 1462
1470int i915_get_reset_stats_ioctl(struct drm_device *dev, 1463static int i915_reset_complete(struct pci_dev *pdev)
1471 void *data, struct drm_file *file)
1472{
1473 struct drm_i915_private *dev_priv = dev->dev_private;
1474 struct drm_i915_reset_stats *args = data;
1475 struct i915_ctx_hang_stats *hs;
1476 struct intel_context *ctx;
1477 int ret;
1478
1479 if (args->flags || args->pad)
1480 return -EINVAL;
1481
1482 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1483 return -EPERM;
1484
1485 ret = mutex_lock_interruptible(&dev->struct_mutex);
1486 if (ret)
1487 return ret;
1488
1489 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1490 if (IS_ERR(ctx)) {
1491 mutex_unlock(&dev->struct_mutex);
1492 return PTR_ERR(ctx);
1493 }
1494 hs = &ctx->hang_stats;
1495
1496 if (capable(CAP_SYS_ADMIN))
1497 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1498 else
1499 args->reset_count = 0;
1500
1501 args->batch_active = hs->batch_active;
1502 args->batch_pending = hs->batch_pending;
1503
1504 mutex_unlock(&dev->struct_mutex);
1505
1506 return 0;
1507}
1508
1509static int i915_reset_complete(struct drm_device *dev)
1510{ 1464{
1511 u8 gdrst; 1465 u8 gdrst;
1512 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1466 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1513 return (gdrst & GRDOM_RESET_STATUS) == 0; 1467 return (gdrst & GRDOM_RESET_STATUS) == 0;
1514} 1468}
1515 1469
1516static int i915_do_reset(struct drm_device *dev, unsigned engine_mask) 1470static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1517{ 1471{
1472 struct pci_dev *pdev = dev_priv->dev->pdev;
1473
1518 /* assert reset for at least 20 usec */ 1474 /* assert reset for at least 20 usec */
1519 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1475 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1520 udelay(20); 1476 udelay(20);
1521 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1477 pci_write_config_byte(pdev, I915_GDRST, 0);
1522 1478
1523 return wait_for(i915_reset_complete(dev), 500); 1479 return wait_for(i915_reset_complete(pdev), 500);
1524} 1480}
1525 1481
1526static int g4x_reset_complete(struct drm_device *dev) 1482static int g4x_reset_complete(struct pci_dev *pdev)
1527{ 1483{
1528 u8 gdrst; 1484 u8 gdrst;
1529 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1485 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1530 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1486 return (gdrst & GRDOM_RESET_ENABLE) == 0;
1531} 1487}
1532 1488
1533static int g33_do_reset(struct drm_device *dev, unsigned engine_mask) 1489static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1534{ 1490{
1535 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1491 struct pci_dev *pdev = dev_priv->dev->pdev;
1536 return wait_for(g4x_reset_complete(dev), 500); 1492 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1493 return wait_for(g4x_reset_complete(pdev), 500);
1537} 1494}
1538 1495
1539static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask) 1496static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1540{ 1497{
1541 struct drm_i915_private *dev_priv = dev->dev_private; 1498 struct pci_dev *pdev = dev_priv->dev->pdev;
1542 int ret; 1499 int ret;
1543 1500
1544 pci_write_config_byte(dev->pdev, I915_GDRST, 1501 pci_write_config_byte(pdev, I915_GDRST,
1545 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1502 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1546 ret = wait_for(g4x_reset_complete(dev), 500); 1503 ret = wait_for(g4x_reset_complete(pdev), 500);
1547 if (ret) 1504 if (ret)
1548 return ret; 1505 return ret;
1549 1506
@@ -1551,9 +1508,9 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
1551 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1508 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1552 POSTING_READ(VDECCLK_GATE_D); 1509 POSTING_READ(VDECCLK_GATE_D);
1553 1510
1554 pci_write_config_byte(dev->pdev, I915_GDRST, 1511 pci_write_config_byte(pdev, I915_GDRST,
1555 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1512 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1556 ret = wait_for(g4x_reset_complete(dev), 500); 1513 ret = wait_for(g4x_reset_complete(pdev), 500);
1557 if (ret) 1514 if (ret)
1558 return ret; 1515 return ret;
1559 1516
@@ -1561,14 +1518,14 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
1561 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1518 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1562 POSTING_READ(VDECCLK_GATE_D); 1519 POSTING_READ(VDECCLK_GATE_D);
1563 1520
1564 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1521 pci_write_config_byte(pdev, I915_GDRST, 0);
1565 1522
1566 return 0; 1523 return 0;
1567} 1524}
1568 1525
1569static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask) 1526static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1527 unsigned engine_mask)
1570{ 1528{
1571 struct drm_i915_private *dev_priv = dev->dev_private;
1572 int ret; 1529 int ret;
1573 1530
1574 I915_WRITE(ILK_GDSR, 1531 I915_WRITE(ILK_GDSR,
@@ -1612,7 +1569,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1612 1569
1613/** 1570/**
1614 * gen6_reset_engines - reset individual engines 1571 * gen6_reset_engines - reset individual engines
1615 * @dev: DRM device 1572 * @dev_priv: i915 device
1616 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset 1573 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1617 * 1574 *
1618 * This function will reset the individual engines that are set in engine_mask. 1575 * This function will reset the individual engines that are set in engine_mask.
@@ -1623,9 +1580,9 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1623 * 1580 *
1624 * Returns 0 on success, nonzero on error. 1581 * Returns 0 on success, nonzero on error.
1625 */ 1582 */
1626static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask) 1583static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1584 unsigned engine_mask)
1627{ 1585{
1628 struct drm_i915_private *dev_priv = dev->dev_private;
1629 struct intel_engine_cs *engine; 1586 struct intel_engine_cs *engine;
1630 const u32 hw_engine_mask[I915_NUM_ENGINES] = { 1587 const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1631 [RCS] = GEN6_GRDOM_RENDER, 1588 [RCS] = GEN6_GRDOM_RENDER,
@@ -1647,7 +1604,7 @@ static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
1647 1604
1648 ret = gen6_hw_domain_reset(dev_priv, hw_mask); 1605 ret = gen6_hw_domain_reset(dev_priv, hw_mask);
1649 1606
1650 intel_uncore_forcewake_reset(dev, true); 1607 intel_uncore_forcewake_reset(dev_priv, true);
1651 1608
1652 return ret; 1609 return ret;
1653} 1610}
@@ -1663,8 +1620,8 @@ static int wait_for_register_fw(struct drm_i915_private *dev_priv,
1663 1620
1664static int gen8_request_engine_reset(struct intel_engine_cs *engine) 1621static int gen8_request_engine_reset(struct intel_engine_cs *engine)
1665{ 1622{
1623 struct drm_i915_private *dev_priv = engine->i915;
1666 int ret; 1624 int ret;
1667 struct drm_i915_private *dev_priv = engine->dev->dev_private;
1668 1625
1669 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1626 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1670 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); 1627 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
@@ -1682,22 +1639,22 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine)
1682 1639
1683static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine) 1640static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
1684{ 1641{
1685 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1642 struct drm_i915_private *dev_priv = engine->i915;
1686 1643
1687 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1644 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1688 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); 1645 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1689} 1646}
1690 1647
1691static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask) 1648static int gen8_reset_engines(struct drm_i915_private *dev_priv,
1649 unsigned engine_mask)
1692{ 1650{
1693 struct drm_i915_private *dev_priv = dev->dev_private;
1694 struct intel_engine_cs *engine; 1651 struct intel_engine_cs *engine;
1695 1652
1696 for_each_engine_masked(engine, dev_priv, engine_mask) 1653 for_each_engine_masked(engine, dev_priv, engine_mask)
1697 if (gen8_request_engine_reset(engine)) 1654 if (gen8_request_engine_reset(engine))
1698 goto not_ready; 1655 goto not_ready;
1699 1656
1700 return gen6_reset_engines(dev, engine_mask); 1657 return gen6_reset_engines(dev_priv, engine_mask);
1701 1658
1702not_ready: 1659not_ready:
1703 for_each_engine_masked(engine, dev_priv, engine_mask) 1660 for_each_engine_masked(engine, dev_priv, engine_mask)
@@ -1706,35 +1663,35 @@ not_ready:
1706 return -EIO; 1663 return -EIO;
1707} 1664}
1708 1665
1709static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *, 1666typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
1710 unsigned engine_mask) 1667
1668static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
1711{ 1669{
1712 if (!i915.reset) 1670 if (!i915.reset)
1713 return NULL; 1671 return NULL;
1714 1672
1715 if (INTEL_INFO(dev)->gen >= 8) 1673 if (INTEL_INFO(dev_priv)->gen >= 8)
1716 return gen8_reset_engines; 1674 return gen8_reset_engines;
1717 else if (INTEL_INFO(dev)->gen >= 6) 1675 else if (INTEL_INFO(dev_priv)->gen >= 6)
1718 return gen6_reset_engines; 1676 return gen6_reset_engines;
1719 else if (IS_GEN5(dev)) 1677 else if (IS_GEN5(dev_priv))
1720 return ironlake_do_reset; 1678 return ironlake_do_reset;
1721 else if (IS_G4X(dev)) 1679 else if (IS_G4X(dev_priv))
1722 return g4x_do_reset; 1680 return g4x_do_reset;
1723 else if (IS_G33(dev)) 1681 else if (IS_G33(dev_priv))
1724 return g33_do_reset; 1682 return g33_do_reset;
1725 else if (INTEL_INFO(dev)->gen >= 3) 1683 else if (INTEL_INFO(dev_priv)->gen >= 3)
1726 return i915_do_reset; 1684 return i915_do_reset;
1727 else 1685 else
1728 return NULL; 1686 return NULL;
1729} 1687}
1730 1688
1731int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask) 1689int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1732{ 1690{
1733 struct drm_i915_private *dev_priv = to_i915(dev); 1691 reset_func reset;
1734 int (*reset)(struct drm_device *, unsigned);
1735 int ret; 1692 int ret;
1736 1693
1737 reset = intel_get_gpu_reset(dev); 1694 reset = intel_get_gpu_reset(dev_priv);
1738 if (reset == NULL) 1695 if (reset == NULL)
1739 return -ENODEV; 1696 return -ENODEV;
1740 1697
@@ -1742,15 +1699,15 @@ int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
1742 * request may be dropped and never completes (causing -EIO). 1699 * request may be dropped and never completes (causing -EIO).
1743 */ 1700 */
1744 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1701 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1745 ret = reset(dev, engine_mask); 1702 ret = reset(dev_priv, engine_mask);
1746 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1703 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1747 1704
1748 return ret; 1705 return ret;
1749} 1706}
1750 1707
1751bool intel_has_gpu_reset(struct drm_device *dev) 1708bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
1752{ 1709{
1753 return intel_get_gpu_reset(dev) != NULL; 1710 return intel_get_gpu_reset(dev_priv) != NULL;
1754} 1711}
1755 1712
1756int intel_guc_reset(struct drm_i915_private *dev_priv) 1713int intel_guc_reset(struct drm_i915_private *dev_priv)
@@ -1758,7 +1715,7 @@ int intel_guc_reset(struct drm_i915_private *dev_priv)
1758 int ret; 1715 int ret;
1759 unsigned long irqflags; 1716 unsigned long irqflags;
1760 1717
1761 if (!i915.enable_guc_submission) 1718 if (!HAS_GUC(dev_priv))
1762 return -EINVAL; 1719 return -EINVAL;
1763 1720
1764 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1721 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -1802,10 +1759,10 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1802{ 1759{
1803 enum forcewake_domains fw_domains; 1760 enum forcewake_domains fw_domains;
1804 1761
1805 if (intel_vgpu_active(dev_priv->dev)) 1762 if (intel_vgpu_active(dev_priv))
1806 return 0; 1763 return 0;
1807 1764
1808 switch (INTEL_INFO(dev_priv)->gen) { 1765 switch (INTEL_GEN(dev_priv)) {
1809 case 9: 1766 case 9:
1810 fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg)); 1767 fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
1811 break; 1768 break;
@@ -1842,10 +1799,10 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
1842{ 1799{
1843 enum forcewake_domains fw_domains; 1800 enum forcewake_domains fw_domains;
1844 1801
1845 if (intel_vgpu_active(dev_priv->dev)) 1802 if (intel_vgpu_active(dev_priv))
1846 return 0; 1803 return 0;
1847 1804
1848 switch (INTEL_INFO(dev_priv)->gen) { 1805 switch (INTEL_GEN(dev_priv)) {
1849 case 9: 1806 case 9:
1850 fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg)); 1807 fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
1851 break; 1808 break;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index c15051de8023..68db9621f1f0 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -403,9 +403,10 @@ struct lvds_dvo_timing {
403 u8 vsync_off:4; 403 u8 vsync_off:4;
404 u8 rsvd0:6; 404 u8 rsvd0:6;
405 u8 hsync_off_hi:2; 405 u8 hsync_off_hi:2;
406 u8 h_image; 406 u8 himage_lo;
407 u8 v_image; 407 u8 vimage_lo;
408 u8 max_hv; 408 u8 vimage_hi:4;
409 u8 himage_hi:4;
409 u8 h_border; 410 u8 h_border;
410 u8 v_border; 411 u8 v_border;
411 u8 rsvd1:3; 412 u8 rsvd1:3;
@@ -446,10 +447,16 @@ struct bdb_lfp_backlight_data_entry {
446 u8 obsolete3; 447 u8 obsolete3;
447} __packed; 448} __packed;
448 449
450struct bdb_lfp_backlight_control_method {
451 u8 type:4;
452 u8 controller:4;
453} __packed;
454
449struct bdb_lfp_backlight_data { 455struct bdb_lfp_backlight_data {
450 u8 entry_size; 456 u8 entry_size;
451 struct bdb_lfp_backlight_data_entry data[16]; 457 struct bdb_lfp_backlight_data_entry data[16];
452 u8 level[16]; 458 u8 level[16];
459 struct bdb_lfp_backlight_control_method backlight_control[16];
453} __packed; 460} __packed;
454 461
455struct aimdb_header { 462struct aimdb_header {
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 1f14b602882b..82656654fb21 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -97,8 +97,8 @@ static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc)
97 return NULL; 97 return NULL;
98} 98}
99 99
100int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format, 100int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
101 int hsync_pin, int vsync_pin) 101 int hsync_pin, int vsync_pin, u32 bus_flags)
102{ 102{
103 struct imx_drm_crtc_helper_funcs *helper; 103 struct imx_drm_crtc_helper_funcs *helper;
104 struct imx_drm_crtc *imx_crtc; 104 struct imx_drm_crtc *imx_crtc;
@@ -110,14 +110,17 @@ int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format,
110 helper = &imx_crtc->imx_drm_helper_funcs; 110 helper = &imx_crtc->imx_drm_helper_funcs;
111 if (helper->set_interface_pix_fmt) 111 if (helper->set_interface_pix_fmt)
112 return helper->set_interface_pix_fmt(encoder->crtc, 112 return helper->set_interface_pix_fmt(encoder->crtc,
113 bus_format, hsync_pin, vsync_pin); 113 bus_format, hsync_pin, vsync_pin,
114 bus_flags);
114 return 0; 115 return 0;
115} 116}
116EXPORT_SYMBOL_GPL(imx_drm_set_bus_format_pins); 117EXPORT_SYMBOL_GPL(imx_drm_set_bus_config);
117 118
118int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format) 119int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format)
119{ 120{
120 return imx_drm_set_bus_format_pins(encoder, bus_format, 2, 3); 121 return imx_drm_set_bus_config(encoder, bus_format, 2, 3,
122 DRM_BUS_FLAG_DE_HIGH |
123 DRM_BUS_FLAG_PIXDATA_NEGEDGE);
121} 124}
122EXPORT_SYMBOL_GPL(imx_drm_set_bus_format); 125EXPORT_SYMBOL_GPL(imx_drm_set_bus_format);
123 126
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index b0241b9d1334..74320a1723b7 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -19,7 +19,8 @@ struct imx_drm_crtc_helper_funcs {
19 int (*enable_vblank)(struct drm_crtc *crtc); 19 int (*enable_vblank)(struct drm_crtc *crtc);
20 void (*disable_vblank)(struct drm_crtc *crtc); 20 void (*disable_vblank)(struct drm_crtc *crtc);
21 int (*set_interface_pix_fmt)(struct drm_crtc *crtc, 21 int (*set_interface_pix_fmt)(struct drm_crtc *crtc,
22 u32 bus_format, int hsync_pin, int vsync_pin); 22 u32 bus_format, int hsync_pin, int vsync_pin,
23 u32 bus_flags);
23 const struct drm_crtc_helper_funcs *crtc_helper_funcs; 24 const struct drm_crtc_helper_funcs *crtc_helper_funcs;
24 const struct drm_crtc_funcs *crtc_funcs; 25 const struct drm_crtc_funcs *crtc_funcs;
25}; 26};
@@ -41,8 +42,8 @@ void imx_drm_mode_config_init(struct drm_device *drm);
41 42
42struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb); 43struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
43 44
44int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, 45int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
45 u32 bus_format, int hsync_pin, int vsync_pin); 46 int hsync_pin, int vsync_pin, u32 bus_flags);
46int imx_drm_set_bus_format(struct drm_encoder *encoder, 47int imx_drm_set_bus_format(struct drm_encoder *encoder,
47 u32 bus_format); 48 u32 bus_format);
48 49
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index a58eee59550a..beff793bb717 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -25,6 +25,7 @@
25#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 25#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
26#include <linux/of_device.h> 26#include <linux/of_device.h>
27#include <linux/of_graph.h> 27#include <linux/of_graph.h>
28#include <video/of_display_timing.h>
28#include <video/of_videomode.h> 29#include <video/of_videomode.h>
29#include <linux/regmap.h> 30#include <linux/regmap.h>
30#include <linux/videodev2.h> 31#include <linux/videodev2.h>
@@ -59,6 +60,7 @@ struct imx_ldb_channel {
59 struct drm_encoder encoder; 60 struct drm_encoder encoder;
60 struct drm_panel *panel; 61 struct drm_panel *panel;
61 struct device_node *child; 62 struct device_node *child;
63 struct i2c_adapter *ddc;
62 int chno; 64 int chno;
63 void *edid; 65 void *edid;
64 int edid_len; 66 int edid_len;
@@ -107,6 +109,9 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
107 return num_modes; 109 return num_modes;
108 } 110 }
109 111
112 if (!imx_ldb_ch->edid && imx_ldb_ch->ddc)
113 imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc);
114
110 if (imx_ldb_ch->edid) { 115 if (imx_ldb_ch->edid) {
111 drm_mode_connector_update_edid_property(connector, 116 drm_mode_connector_update_edid_property(connector,
112 imx_ldb_ch->edid); 117 imx_ldb_ch->edid);
@@ -553,7 +558,8 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
553 558
554 for_each_child_of_node(np, child) { 559 for_each_child_of_node(np, child) {
555 struct imx_ldb_channel *channel; 560 struct imx_ldb_channel *channel;
556 struct device_node *port; 561 struct device_node *ddc_node;
562 struct device_node *ep;
557 563
558 ret = of_property_read_u32(child, "reg", &i); 564 ret = of_property_read_u32(child, "reg", &i);
559 if (ret || i < 0 || i > 1) 565 if (ret || i < 0 || i > 1)
@@ -576,33 +582,54 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
576 * The output port is port@4 with an external 4-port mux or 582 * The output port is port@4 with an external 4-port mux or
577 * port@2 with the internal 2-port mux. 583 * port@2 with the internal 2-port mux.
578 */ 584 */
579 port = of_graph_get_port_by_id(child, imx_ldb->lvds_mux ? 4 : 2); 585 ep = of_graph_get_endpoint_by_regs(child,
580 if (port) { 586 imx_ldb->lvds_mux ? 4 : 2,
581 struct device_node *endpoint, *remote; 587 -1);
582 588 if (ep) {
583 endpoint = of_get_child_by_name(port, "endpoint"); 589 struct device_node *remote;
584 if (endpoint) { 590
585 remote = of_graph_get_remote_port_parent(endpoint); 591 remote = of_graph_get_remote_port_parent(ep);
586 if (remote) 592 of_node_put(ep);
587 channel->panel = of_drm_find_panel(remote); 593 if (remote)
588 else 594 channel->panel = of_drm_find_panel(remote);
589 return -EPROBE_DEFER; 595 else
590 if (!channel->panel) { 596 return -EPROBE_DEFER;
591 dev_err(dev, "panel not found: %s\n", 597 of_node_put(remote);
592 remote->full_name); 598 if (!channel->panel) {
593 return -EPROBE_DEFER; 599 dev_err(dev, "panel not found: %s\n",
594 } 600 remote->full_name);
601 return -EPROBE_DEFER;
595 } 602 }
596 } 603 }
597 604
598 edidp = of_get_property(child, "edid", &channel->edid_len); 605 ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0);
599 if (edidp) { 606 if (ddc_node) {
600 channel->edid = kmemdup(edidp, channel->edid_len, 607 channel->ddc = of_find_i2c_adapter_by_node(ddc_node);
601 GFP_KERNEL); 608 of_node_put(ddc_node);
602 } else if (!channel->panel) { 609 if (!channel->ddc) {
603 ret = of_get_drm_display_mode(child, &channel->mode, 0); 610 dev_warn(dev, "failed to get ddc i2c adapter\n");
604 if (!ret) 611 return -EPROBE_DEFER;
605 channel->mode_valid = 1; 612 }
613 }
614
615 if (!channel->ddc) {
616 /* if no DDC available, fallback to hardcoded EDID */
617 dev_dbg(dev, "no ddc available\n");
618
619 edidp = of_get_property(child, "edid",
620 &channel->edid_len);
621 if (edidp) {
622 channel->edid = kmemdup(edidp,
623 channel->edid_len,
624 GFP_KERNEL);
625 } else if (!channel->panel) {
626 /* fallback to display-timings node */
627 ret = of_get_drm_display_mode(child,
628 &channel->mode,
629 OF_USE_NATIVE_MODE);
630 if (!ret)
631 channel->mode_valid = 1;
632 }
606 } 633 }
607 634
608 channel->bus_format = of_get_bus_format(dev, child); 635 channel->bus_format = of_get_bus_format(dev, child);
@@ -647,6 +674,7 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
647 channel->encoder.funcs->destroy(&channel->encoder); 674 channel->encoder.funcs->destroy(&channel->encoder);
648 675
649 kfree(channel->edid); 676 kfree(channel->edid);
677 i2c_put_adapter(channel->ddc);
650 } 678 }
651} 679}
652 680
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index ae7a9fb3b8a2..baf788121287 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -294,8 +294,10 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
294 294
295 switch (tve->mode) { 295 switch (tve->mode) {
296 case TVE_MODE_VGA: 296 case TVE_MODE_VGA:
297 imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24, 297 imx_drm_set_bus_config(encoder, MEDIA_BUS_FMT_GBR888_1X24,
298 tve->hsync_pin, tve->vsync_pin); 298 tve->hsync_pin, tve->vsync_pin,
299 DRM_BUS_FLAG_DE_HIGH |
300 DRM_BUS_FLAG_PIXDATA_NEGEDGE);
299 break; 301 break;
300 case TVE_MODE_TVOUT: 302 case TVE_MODE_TVOUT:
301 imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24); 303 imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index b2c30b8d9816..fc040417e1e8 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -66,6 +66,7 @@ struct ipu_crtc {
66 struct ipu_flip_work *flip_work; 66 struct ipu_flip_work *flip_work;
67 int irq; 67 int irq;
68 u32 bus_format; 68 u32 bus_format;
69 u32 bus_flags;
69 int di_hsync_pin; 70 int di_hsync_pin;
70 int di_vsync_pin; 71 int di_vsync_pin;
71}; 72};
@@ -271,8 +272,10 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
271 else 272 else
272 sig_cfg.clkflags = 0; 273 sig_cfg.clkflags = 0;
273 274
274 sig_cfg.enable_pol = 1; 275 sig_cfg.enable_pol = !(ipu_crtc->bus_flags & DRM_BUS_FLAG_DE_LOW);
275 sig_cfg.clk_pol = 0; 276 /* Default to driving pixel data on negative clock edges */
277 sig_cfg.clk_pol = !!(ipu_crtc->bus_flags &
278 DRM_BUS_FLAG_PIXDATA_POSEDGE);
276 sig_cfg.bus_format = ipu_crtc->bus_format; 279 sig_cfg.bus_format = ipu_crtc->bus_format;
277 sig_cfg.v_to_h_sync = 0; 280 sig_cfg.v_to_h_sync = 0;
278 sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin; 281 sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin;
@@ -396,11 +399,12 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
396} 399}
397 400
398static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, 401static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
399 u32 bus_format, int hsync_pin, int vsync_pin) 402 u32 bus_format, int hsync_pin, int vsync_pin, u32 bus_flags)
400{ 403{
401 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); 404 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
402 405
403 ipu_crtc->bus_format = bus_format; 406 ipu_crtc->bus_format = bus_format;
407 ipu_crtc->bus_flags = bus_flags;
404 ipu_crtc->di_hsync_pin = hsync_pin; 408 ipu_crtc->di_hsync_pin = hsync_pin;
405 ipu_crtc->di_vsync_pin = vsync_pin; 409 ipu_crtc->di_vsync_pin = vsync_pin;
406 410
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 681ec6eb77d9..a4bb44118d33 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -38,6 +38,8 @@ static const uint32_t ipu_plane_formats[] = {
38 DRM_FORMAT_RGBX8888, 38 DRM_FORMAT_RGBX8888,
39 DRM_FORMAT_BGRA8888, 39 DRM_FORMAT_BGRA8888,
40 DRM_FORMAT_BGRA8888, 40 DRM_FORMAT_BGRA8888,
41 DRM_FORMAT_UYVY,
42 DRM_FORMAT_VYUY,
41 DRM_FORMAT_YUYV, 43 DRM_FORMAT_YUYV,
42 DRM_FORMAT_YVYU, 44 DRM_FORMAT_YVYU,
43 DRM_FORMAT_YUV420, 45 DRM_FORMAT_YUV420,
@@ -428,7 +430,6 @@ static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
428 if (crtc != plane->crtc) 430 if (crtc != plane->crtc)
429 dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n", 431 dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n",
430 plane->crtc, crtc); 432 plane->crtc, crtc);
431 plane->crtc = crtc;
432 433
433 if (!ipu_plane->enabled) 434 if (!ipu_plane->enabled)
434 ipu_plane_enable(ipu_plane); 435 ipu_plane_enable(ipu_plane);
@@ -461,7 +462,7 @@ static void ipu_plane_destroy(struct drm_plane *plane)
461 kfree(ipu_plane); 462 kfree(ipu_plane);
462} 463}
463 464
464static struct drm_plane_funcs ipu_plane_funcs = { 465static const struct drm_plane_funcs ipu_plane_funcs = {
465 .update_plane = ipu_update_plane, 466 .update_plane = ipu_update_plane,
466 .disable_plane = ipu_disable_plane, 467 .disable_plane = ipu_disable_plane,
467 .destroy = ipu_plane_destroy, 468 .destroy = ipu_plane_destroy,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 363e2c7741e2..2d1fd02cd3d6 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -35,7 +35,6 @@ struct imx_parallel_display {
35 void *edid; 35 void *edid;
36 int edid_len; 36 int edid_len;
37 u32 bus_format; 37 u32 bus_format;
38 int mode_valid;
39 struct drm_display_mode mode; 38 struct drm_display_mode mode;
40 struct drm_panel *panel; 39 struct drm_panel *panel;
41}; 40};
@@ -68,17 +67,6 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
68 num_modes = drm_add_edid_modes(connector, imxpd->edid); 67 num_modes = drm_add_edid_modes(connector, imxpd->edid);
69 } 68 }
70 69
71 if (imxpd->mode_valid) {
72 struct drm_display_mode *mode = drm_mode_create(connector->dev);
73
74 if (!mode)
75 return -EINVAL;
76 drm_mode_copy(mode, &imxpd->mode);
77 mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
78 drm_mode_probed_add(connector, mode);
79 num_modes++;
80 }
81
82 if (np) { 70 if (np) {
83 struct drm_display_mode *mode = drm_mode_create(connector->dev); 71 struct drm_display_mode *mode = drm_mode_create(connector->dev);
84 72
@@ -115,8 +103,8 @@ static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode)
115static void imx_pd_encoder_prepare(struct drm_encoder *encoder) 103static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
116{ 104{
117 struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); 105 struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
118 106 imx_drm_set_bus_config(encoder, imxpd->bus_format, 2, 3,
119 imx_drm_set_bus_format(encoder, imxpd->bus_format); 107 imxpd->connector.display_info.bus_flags);
120} 108}
121 109
122static void imx_pd_encoder_commit(struct drm_encoder *encoder) 110static void imx_pd_encoder_commit(struct drm_encoder *encoder)
@@ -203,7 +191,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
203{ 191{
204 struct drm_device *drm = data; 192 struct drm_device *drm = data;
205 struct device_node *np = dev->of_node; 193 struct device_node *np = dev->of_node;
206 struct device_node *port; 194 struct device_node *ep;
207 const u8 *edidp; 195 const u8 *edidp;
208 struct imx_parallel_display *imxpd; 196 struct imx_parallel_display *imxpd;
209 int ret; 197 int ret;
@@ -230,18 +218,18 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
230 } 218 }
231 219
232 /* port@1 is the output port */ 220 /* port@1 is the output port */
233 port = of_graph_get_port_by_id(np, 1); 221 ep = of_graph_get_endpoint_by_regs(np, 1, -1);
234 if (port) { 222 if (ep) {
235 struct device_node *endpoint, *remote; 223 struct device_node *remote;
236 224
237 endpoint = of_get_child_by_name(port, "endpoint"); 225 remote = of_graph_get_remote_port_parent(ep);
238 if (endpoint) { 226 of_node_put(ep);
239 remote = of_graph_get_remote_port_parent(endpoint); 227 if (remote) {
240 if (remote) 228 imxpd->panel = of_drm_find_panel(remote);
241 imxpd->panel = of_drm_find_panel(remote); 229 of_node_put(remote);
242 if (!imxpd->panel)
243 return -EPROBE_DEFER;
244 } 230 }
231 if (!imxpd->panel)
232 return -EPROBE_DEFER;
245 } 233 }
246 234
247 imxpd->dev = dev; 235 imxpd->dev = dev;
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index d05ca7901315..0186e500d2a5 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -432,11 +432,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
432 unsigned long pll_rate; 432 unsigned long pll_rate;
433 unsigned int factor; 433 unsigned int factor;
434 434
435 if (!dpi) {
436 dev_err(dpi->dev, "invalid argument\n");
437 return -EINVAL;
438 }
439
440 pix_rate = 1000UL * mode->clock; 435 pix_rate = 1000UL * mode->clock;
441 if (mode->clock <= 74000) 436 if (mode->clock <= 74000)
442 factor = 8 * 3; 437 factor = 8 * 3;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index b1223d54d0ab..c33bf98c5d5e 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -91,7 +91,7 @@ static int mtk_atomic_commit(struct drm_device *drm,
91 mutex_lock(&private->commit.lock); 91 mutex_lock(&private->commit.lock);
92 flush_work(&private->commit.work); 92 flush_work(&private->commit.work);
93 93
94 drm_atomic_helper_swap_state(drm, state); 94 drm_atomic_helper_swap_state(state, true);
95 95
96 if (async) 96 if (async)
97 mtk_atomic_schedule(private, state); 97 mtk_atomic_schedule(private, state);
@@ -243,7 +243,7 @@ static struct drm_driver mtk_drm_driver = {
243 .enable_vblank = mtk_drm_crtc_enable_vblank, 243 .enable_vblank = mtk_drm_crtc_enable_vblank,
244 .disable_vblank = mtk_drm_crtc_disable_vblank, 244 .disable_vblank = mtk_drm_crtc_disable_vblank,
245 245
246 .gem_free_object = mtk_drm_gem_free_object, 246 .gem_free_object_unlocked = mtk_drm_gem_free_object,
247 .gem_vm_ops = &drm_gem_cma_vm_ops, 247 .gem_vm_ops = &drm_gem_cma_vm_ops,
248 .dumb_create = mtk_drm_gem_dumb_create, 248 .dumb_create = mtk_drm_gem_dumb_create,
249 .dumb_map_offset = mtk_drm_gem_dumb_map_offset, 249 .dumb_map_offset = mtk_drm_gem_dumb_map_offset,
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 2d808e59fefd..28b2044ed9f2 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -575,14 +575,6 @@ static int mtk_dsi_connector_get_modes(struct drm_connector *connector)
575 return drm_panel_get_modes(dsi->panel); 575 return drm_panel_get_modes(dsi->panel);
576} 576}
577 577
578static struct drm_encoder *mtk_dsi_connector_best_encoder(
579 struct drm_connector *connector)
580{
581 struct mtk_dsi *dsi = connector_to_dsi(connector);
582
583 return &dsi->encoder;
584}
585
586static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = { 578static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
587 .mode_fixup = mtk_dsi_encoder_mode_fixup, 579 .mode_fixup = mtk_dsi_encoder_mode_fixup,
588 .mode_set = mtk_dsi_encoder_mode_set, 580 .mode_set = mtk_dsi_encoder_mode_set,
@@ -603,7 +595,6 @@ static const struct drm_connector_funcs mtk_dsi_connector_funcs = {
603static const struct drm_connector_helper_funcs 595static const struct drm_connector_helper_funcs
604 mtk_dsi_connector_helper_funcs = { 596 mtk_dsi_connector_helper_funcs = {
605 .get_modes = mtk_dsi_connector_get_modes, 597 .get_modes = mtk_dsi_connector_get_modes,
606 .best_encoder = mtk_dsi_connector_best_encoder,
607}; 598};
608 599
609static int mtk_drm_attach_bridge(struct drm_bridge *bridge, 600static int mtk_drm_attach_bridge(struct drm_bridge *bridge,
@@ -695,10 +686,8 @@ static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi)
695{ 686{
696 drm_encoder_cleanup(&dsi->encoder); 687 drm_encoder_cleanup(&dsi->encoder);
697 /* Skip connector cleanup if creation was delegated to the bridge */ 688 /* Skip connector cleanup if creation was delegated to the bridge */
698 if (dsi->conn.dev) { 689 if (dsi->conn.dev)
699 drm_connector_unregister(&dsi->conn);
700 drm_connector_cleanup(&dsi->conn); 690 drm_connector_cleanup(&dsi->conn);
701 }
702} 691}
703 692
704static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp) 693static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index ebb470ff7200..2b4b125eebc3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -101,7 +101,7 @@ static struct drm_driver driver = {
101 .minor = DRIVER_MINOR, 101 .minor = DRIVER_MINOR,
102 .patchlevel = DRIVER_PATCHLEVEL, 102 .patchlevel = DRIVER_PATCHLEVEL,
103 103
104 .gem_free_object = mgag200_gem_free_object, 104 .gem_free_object_unlocked = mgag200_gem_free_object,
105 .dumb_create = mgag200_dumb_create, 105 .dumb_create = mgag200_dumb_create,
106 .dumb_map_offset = mgag200_dumb_mmap_offset, 106 .dumb_map_offset = mgag200_dumb_mmap_offset,
107 .dumb_destroy = drm_gem_dumb_destroy, 107 .dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 14e64e08909e..6b21cb27e1cc 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -182,7 +182,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
182 } 182 }
183 } 183 }
184 184
185 fvv = pllreffreq * testn / testm; 185 fvv = pllreffreq * (n + 1) / (m + 1);
186 fvv = (fvv - 800000) / 50000; 186 fvv = (fvv - 800000) / 50000;
187 187
188 if (fvv > 15) 188 if (fvv > 15)
@@ -202,6 +202,14 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
202 WREG_DAC(MGA1064_PIX_PLLC_M, m); 202 WREG_DAC(MGA1064_PIX_PLLC_M, m);
203 WREG_DAC(MGA1064_PIX_PLLC_N, n); 203 WREG_DAC(MGA1064_PIX_PLLC_N, n);
204 WREG_DAC(MGA1064_PIX_PLLC_P, p); 204 WREG_DAC(MGA1064_PIX_PLLC_P, p);
205
206 if (mdev->unique_rev_id >= 0x04) {
207 WREG_DAC(0x1a, 0x09);
208 msleep(20);
209 WREG_DAC(0x1a, 0x01);
210
211 }
212
205 return 0; 213 return 0;
206} 214}
207 215
@@ -1344,19 +1352,20 @@ static void mga_crtc_commit(struct drm_crtc *crtc)
1344 * use this for 8-bit mode so can't perform smooth fades on deeper modes, 1352 * use this for 8-bit mode so can't perform smooth fades on deeper modes,
1345 * but it's a requirement that we provide the function 1353 * but it's a requirement that we provide the function
1346 */ 1354 */
1347static void mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 1355static int mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
1348 u16 *blue, uint32_t start, uint32_t size) 1356 u16 *blue, uint32_t size)
1349{ 1357{
1350 struct mga_crtc *mga_crtc = to_mga_crtc(crtc); 1358 struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
1351 int end = (start + size > MGAG200_LUT_SIZE) ? MGAG200_LUT_SIZE : start + size;
1352 int i; 1359 int i;
1353 1360
1354 for (i = start; i < end; i++) { 1361 for (i = 0; i < size; i++) {
1355 mga_crtc->lut_r[i] = red[i] >> 8; 1362 mga_crtc->lut_r[i] = red[i] >> 8;
1356 mga_crtc->lut_g[i] = green[i] >> 8; 1363 mga_crtc->lut_g[i] = green[i] >> 8;
1357 mga_crtc->lut_b[i] = blue[i] >> 8; 1364 mga_crtc->lut_b[i] = blue[i] >> 8;
1358 } 1365 }
1359 mga_crtc_load_lut(crtc); 1366 mga_crtc_load_lut(crtc);
1367
1368 return 0;
1360} 1369}
1361 1370
1362/* Simple cleanup function */ 1371/* Simple cleanup function */
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
index 72360cd038c0..5960628ceb93 100644
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ b/drivers/gpu/drm/msm/edp/edp_connector.c
@@ -91,15 +91,6 @@ static int edp_connector_mode_valid(struct drm_connector *connector,
91 return MODE_OK; 91 return MODE_OK;
92} 92}
93 93
94static struct drm_encoder *
95edp_connector_best_encoder(struct drm_connector *connector)
96{
97 struct edp_connector *edp_connector = to_edp_connector(connector);
98
99 DBG("");
100 return edp_connector->edp->encoder;
101}
102
103static const struct drm_connector_funcs edp_connector_funcs = { 94static const struct drm_connector_funcs edp_connector_funcs = {
104 .dpms = drm_atomic_helper_connector_dpms, 95 .dpms = drm_atomic_helper_connector_dpms,
105 .detect = edp_connector_detect, 96 .detect = edp_connector_detect,
@@ -113,7 +104,6 @@ static const struct drm_connector_funcs edp_connector_funcs = {
113static const struct drm_connector_helper_funcs edp_connector_helper_funcs = { 104static const struct drm_connector_helper_funcs edp_connector_helper_funcs = {
114 .get_modes = edp_connector_get_modes, 105 .get_modes = edp_connector_get_modes,
115 .mode_valid = edp_connector_mode_valid, 106 .mode_valid = edp_connector_mode_valid,
116 .best_encoder = edp_connector_best_encoder,
117}; 107};
118 108
119/* initialize connector */ 109/* initialize connector */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index b15d72683112..a2515b466ce5 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -406,13 +406,6 @@ static int msm_hdmi_connector_mode_valid(struct drm_connector *connector,
406 return 0; 406 return 0;
407} 407}
408 408
409static struct drm_encoder *
410msm_hdmi_connector_best_encoder(struct drm_connector *connector)
411{
412 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
413 return hdmi_connector->hdmi->encoder;
414}
415
416static const struct drm_connector_funcs hdmi_connector_funcs = { 409static const struct drm_connector_funcs hdmi_connector_funcs = {
417 .dpms = drm_atomic_helper_connector_dpms, 410 .dpms = drm_atomic_helper_connector_dpms,
418 .detect = hdmi_connector_detect, 411 .detect = hdmi_connector_detect,
@@ -426,7 +419,6 @@ static const struct drm_connector_funcs hdmi_connector_funcs = {
426static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = { 419static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = {
427 .get_modes = msm_hdmi_connector_get_modes, 420 .get_modes = msm_hdmi_connector_get_modes,
428 .mode_valid = msm_hdmi_connector_mode_valid, 421 .mode_valid = msm_hdmi_connector_mode_valid,
429 .best_encoder = msm_hdmi_connector_best_encoder,
430}; 422};
431 423
432/* initialize connector */ 424/* initialize connector */
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 67442d50a6c2..f145d256e332 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -106,31 +106,27 @@ out:
106static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) 106static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
107{ 107{
108 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 108 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
109 int i, ncrtcs = state->dev->mode_config.num_crtc; 109 int i;
110 struct drm_crtc *crtc;
111 struct drm_crtc_state *crtc_state;
110 112
111 mdp4_enable(mdp4_kms); 113 mdp4_enable(mdp4_kms);
112 114
113 /* see 119ecb7fd */ 115 /* see 119ecb7fd */
114 for (i = 0; i < ncrtcs; i++) { 116 for_each_crtc_in_state(state, crtc, crtc_state, i)
115 struct drm_crtc *crtc = state->crtcs[i];
116 if (!crtc)
117 continue;
118 drm_crtc_vblank_get(crtc); 117 drm_crtc_vblank_get(crtc);
119 }
120} 118}
121 119
122static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 120static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
123{ 121{
124 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 122 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
125 int i, ncrtcs = state->dev->mode_config.num_crtc; 123 int i;
124 struct drm_crtc *crtc;
125 struct drm_crtc_state *crtc_state;
126 126
127 /* see 119ecb7fd */ 127 /* see 119ecb7fd */
128 for (i = 0; i < ncrtcs; i++) { 128 for_each_crtc_in_state(state, crtc, crtc_state, i)
129 struct drm_crtc *crtc = state->crtcs[i];
130 if (!crtc)
131 continue;
132 drm_crtc_vblank_put(crtc); 129 drm_crtc_vblank_put(crtc);
133 }
134 130
135 mdp4_disable(mdp4_kms); 131 mdp4_disable(mdp4_kms);
136} 132}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index 2648cd7631ef..353429b05733 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -90,14 +90,6 @@ static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
90 return MODE_OK; 90 return MODE_OK;
91} 91}
92 92
93static struct drm_encoder *
94mdp4_lvds_connector_best_encoder(struct drm_connector *connector)
95{
96 struct mdp4_lvds_connector *mdp4_lvds_connector =
97 to_mdp4_lvds_connector(connector);
98 return mdp4_lvds_connector->encoder;
99}
100
101static const struct drm_connector_funcs mdp4_lvds_connector_funcs = { 93static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
102 .dpms = drm_atomic_helper_connector_dpms, 94 .dpms = drm_atomic_helper_connector_dpms,
103 .detect = mdp4_lvds_connector_detect, 95 .detect = mdp4_lvds_connector_detect,
@@ -111,7 +103,6 @@ static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
111static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = { 103static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
112 .get_modes = mdp4_lvds_connector_get_modes, 104 .get_modes = mdp4_lvds_connector_get_modes,
113 .mode_valid = mdp4_lvds_connector_mode_valid, 105 .mode_valid = mdp4_lvds_connector_mode_valid,
114 .best_encoder = mdp4_lvds_connector_best_encoder,
115}; 106};
116 107
117/* initialize connector */ 108/* initialize connector */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 88fe256c1931..4e8ed739f558 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -374,6 +374,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
374 struct drm_device *dev = crtc->dev; 374 struct drm_device *dev = crtc->dev;
375 struct plane_state pstates[STAGE_MAX + 1]; 375 struct plane_state pstates[STAGE_MAX + 1];
376 const struct mdp5_cfg_hw *hw_cfg; 376 const struct mdp5_cfg_hw *hw_cfg;
377 const struct drm_plane_state *pstate;
377 int cnt = 0, i; 378 int cnt = 0, i;
378 379
379 DBG("%s: check", mdp5_crtc->name); 380 DBG("%s: check", mdp5_crtc->name);
@@ -382,20 +383,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
382 * and that we don't have conflicting mixer stages: 383 * and that we don't have conflicting mixer stages:
383 */ 384 */
384 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 385 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
385 drm_atomic_crtc_state_for_each_plane(plane, state) { 386 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
386 struct drm_plane_state *pstate;
387 if (cnt >= (hw_cfg->lm.nb_stages)) { 387 if (cnt >= (hw_cfg->lm.nb_stages)) {
388 dev_err(dev->dev, "too many planes!\n"); 388 dev_err(dev->dev, "too many planes!\n");
389 return -EINVAL; 389 return -EINVAL;
390 } 390 }
391 391
392 pstate = state->state->plane_states[drm_plane_index(plane)];
393 392
394 /* plane might not have changed, in which case take
395 * current state:
396 */
397 if (!pstate)
398 pstate = plane->state;
399 pstates[cnt].plane = plane; 393 pstates[cnt].plane = plane;
400 pstates[cnt].state = to_mdp5_plane_state(pstate); 394 pstates[cnt].state = to_mdp5_plane_state(pstate);
401 395
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 484b4d15e71d..f0c285b1c027 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -78,17 +78,11 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
78{ 78{
79 int i; 79 int i;
80 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 80 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
81 int nplanes = mdp5_kms->dev->mode_config.num_total_plane; 81 struct drm_plane *plane;
82 82 struct drm_plane_state *plane_state;
83 for (i = 0; i < nplanes; i++) {
84 struct drm_plane *plane = state->planes[i];
85 struct drm_plane_state *plane_state = state->plane_states[i];
86
87 if (!plane)
88 continue;
89 83
84 for_each_plane_in_state(state, plane, plane_state, i)
90 mdp5_plane_complete_commit(plane, plane_state); 85 mdp5_plane_complete_commit(plane, plane_state);
91 }
92 86
93 mdp5_disable(mdp5_kms); 87 mdp5_disable(mdp5_kms);
94} 88}
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index e3892c263f27..4a8a6f1f1151 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -84,17 +84,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
84 struct drm_atomic_state *old_state) 84 struct drm_atomic_state *old_state)
85{ 85{
86 struct drm_crtc *crtc; 86 struct drm_crtc *crtc;
87 struct drm_crtc_state *crtc_state;
87 struct msm_drm_private *priv = old_state->dev->dev_private; 88 struct msm_drm_private *priv = old_state->dev->dev_private;
88 struct msm_kms *kms = priv->kms; 89 struct msm_kms *kms = priv->kms;
89 int ncrtcs = old_state->dev->mode_config.num_crtc;
90 int i; 90 int i;
91 91
92 for (i = 0; i < ncrtcs; i++) { 92 for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
93 crtc = old_state->crtcs[i];
94
95 if (!crtc)
96 continue;
97
98 if (!crtc->state->enable) 93 if (!crtc->state->enable)
99 continue; 94 continue;
100 95
@@ -192,9 +187,11 @@ int msm_atomic_commit(struct drm_device *dev,
192 struct drm_atomic_state *state, bool nonblock) 187 struct drm_atomic_state *state, bool nonblock)
193{ 188{
194 struct msm_drm_private *priv = dev->dev_private; 189 struct msm_drm_private *priv = dev->dev_private;
195 int nplanes = dev->mode_config.num_total_plane;
196 int ncrtcs = dev->mode_config.num_crtc;
197 struct msm_commit *c; 190 struct msm_commit *c;
191 struct drm_crtc *crtc;
192 struct drm_crtc_state *crtc_state;
193 struct drm_plane *plane;
194 struct drm_plane_state *plane_state;
198 int i, ret; 195 int i, ret;
199 196
200 ret = drm_atomic_helper_prepare_planes(dev, state); 197 ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -210,28 +207,18 @@ int msm_atomic_commit(struct drm_device *dev,
210 /* 207 /*
211 * Figure out what crtcs we have: 208 * Figure out what crtcs we have:
212 */ 209 */
213 for (i = 0; i < ncrtcs; i++) { 210 for_each_crtc_in_state(state, crtc, crtc_state, i)
214 struct drm_crtc *crtc = state->crtcs[i]; 211 c->crtc_mask |= drm_crtc_mask(crtc);
215 if (!crtc)
216 continue;
217 c->crtc_mask |= (1 << drm_crtc_index(crtc));
218 }
219 212
220 /* 213 /*
221 * Figure out what fence to wait for: 214 * Figure out what fence to wait for:
222 */ 215 */
223 for (i = 0; i < nplanes; i++) { 216 for_each_plane_in_state(state, plane, plane_state, i) {
224 struct drm_plane *plane = state->planes[i]; 217 if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
225 struct drm_plane_state *new_state = state->plane_states[i]; 218 struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0);
226
227 if (!plane)
228 continue;
229
230 if ((plane->state->fb != new_state->fb) && new_state->fb) {
231 struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0);
232 struct msm_gem_object *msm_obj = to_msm_bo(obj); 219 struct msm_gem_object *msm_obj = to_msm_bo(obj);
233 220
234 new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv); 221 plane_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
235 } 222 }
236 } 223 }
237 224
@@ -251,7 +238,7 @@ int msm_atomic_commit(struct drm_device *dev,
251 * the software side now. 238 * the software side now.
252 */ 239 */
253 240
254 drm_atomic_helper_swap_state(dev, state); 241 drm_atomic_helper_swap_state(state, true);
255 242
256 /* 243 /*
257 * Everything below can be run asynchronously without the need to grab 244 * Everything below can be run asynchronously without the need to grab
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 461dc8b873f0..7919c24c6ddd 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -56,17 +56,9 @@ static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
56 kfree(msm_fb); 56 kfree(msm_fb);
57} 57}
58 58
59static int msm_framebuffer_dirty(struct drm_framebuffer *fb,
60 struct drm_file *file_priv, unsigned flags, unsigned color,
61 struct drm_clip_rect *clips, unsigned num_clips)
62{
63 return 0;
64}
65
66static const struct drm_framebuffer_funcs msm_framebuffer_funcs = { 59static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
67 .create_handle = msm_framebuffer_create_handle, 60 .create_handle = msm_framebuffer_create_handle,
68 .destroy = msm_framebuffer_destroy, 61 .destroy = msm_framebuffer_destroy,
69 .dirty = msm_framebuffer_dirty,
70}; 62};
71 63
72#ifdef CONFIG_DEBUG_FS 64#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index d9759bf3482e..1a061e3e8b9e 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -184,21 +184,7 @@ fail:
184 return ret; 184 return ret;
185} 185}
186 186
187static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc,
188 u16 red, u16 green, u16 blue, int regno)
189{
190 DBG("fbdev: set gamma");
191}
192
193static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
194 u16 *red, u16 *green, u16 *blue, int regno)
195{
196 DBG("fbdev: get gamma");
197}
198
199static const struct drm_fb_helper_funcs msm_fb_helper_funcs = { 187static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
200 .gamma_set = msm_crtc_fb_gamma_set,
201 .gamma_get = msm_crtc_fb_gamma_get,
202 .fb_probe = msm_fbdev_create, 188 .fb_probe = msm_fbdev_create,
203}; 189};
204 190
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6f318c54da33..0cb7a18cde26 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -785,14 +785,14 @@ nv_crtc_disable(struct drm_crtc *crtc)
785 nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); 785 nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
786} 786}
787 787
788static void 788static int
789nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, 789nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
790 uint32_t size) 790 uint32_t size)
791{ 791{
792 int end = (start + size > 256) ? 256 : start + size, i;
793 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 792 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
793 int i;
794 794
795 for (i = start; i < end; i++) { 795 for (i = 0; i < size; i++) {
796 nv_crtc->lut.r[i] = r[i]; 796 nv_crtc->lut.r[i] = r[i];
797 nv_crtc->lut.g[i] = g[i]; 797 nv_crtc->lut.g[i] = g[i];
798 nv_crtc->lut.b[i] = b[i]; 798 nv_crtc->lut.b[i] = b[i];
@@ -805,10 +805,12 @@ nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
805 */ 805 */
806 if (!nv_crtc->base.primary->fb) { 806 if (!nv_crtc->base.primary->fb) {
807 nv_crtc->lut.depth = 0; 807 nv_crtc->lut.depth = 0;
808 return; 808 return 0;
809 } 809 }
810 810
811 nv_crtc_gamma_load(crtc); 811 nv_crtc_gamma_load(crtc);
812
813 return 0;
812} 814}
813 815
814static int 816static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7c77f960c8b8..6072fe292db8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -760,12 +760,11 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
760 760
761 /* Initialize a page flip struct */ 761 /* Initialize a page flip struct */
762 *s = (struct nouveau_page_flip_state) 762 *s = (struct nouveau_page_flip_state)
763 { { }, event, nouveau_crtc(crtc)->index, 763 { { }, event, crtc, fb->bits_per_pixel, fb->pitches[0],
764 fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
765 new_bo->bo.offset }; 764 new_bo->bo.offset };
766 765
767 /* Keep vblanks on during flip, for the target crtc of this flip */ 766 /* Keep vblanks on during flip, for the target crtc of this flip */
768 drm_vblank_get(dev, nouveau_crtc(crtc)->index); 767 drm_crtc_vblank_get(crtc);
769 768
770 /* Emit a page flip */ 769 /* Emit a page flip */
771 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 770 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
@@ -810,7 +809,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
810 return 0; 809 return 0;
811 810
812fail_unreserve: 811fail_unreserve:
813 drm_vblank_put(dev, nouveau_crtc(crtc)->index); 812 drm_crtc_vblank_put(crtc);
814 ttm_bo_unreserve(&old_bo->bo); 813 ttm_bo_unreserve(&old_bo->bo);
815fail_unpin: 814fail_unpin:
816 mutex_unlock(&cli->mutex); 815 mutex_unlock(&cli->mutex);
@@ -842,17 +841,17 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
842 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); 841 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
843 if (s->event) { 842 if (s->event) {
844 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { 843 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
845 drm_arm_vblank_event(dev, s->crtc, s->event); 844 drm_crtc_arm_vblank_event(s->crtc, s->event);
846 } else { 845 } else {
847 drm_send_vblank_event(dev, s->crtc, s->event); 846 drm_crtc_send_vblank_event(s->crtc, s->event);
848 847
849 /* Give up ownership of vblank for page-flipped crtc */ 848 /* Give up ownership of vblank for page-flipped crtc */
850 drm_vblank_put(dev, s->crtc); 849 drm_crtc_vblank_put(s->crtc);
851 } 850 }
852 } 851 }
853 else { 852 else {
854 /* Give up ownership of vblank for page-flipped crtc */ 853 /* Give up ownership of vblank for page-flipped crtc */
855 drm_vblank_put(dev, s->crtc); 854 drm_crtc_vblank_put(s->crtc);
856 } 855 }
857 856
858 list_del(&s->head); 857 list_del(&s->head);
@@ -873,9 +872,10 @@ nouveau_flip_complete(struct nvif_notify *notify)
873 872
874 if (!nouveau_finish_page_flip(chan, &state)) { 873 if (!nouveau_finish_page_flip(chan, &state)) {
875 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { 874 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
876 nv_set_crtc_base(drm->dev, state.crtc, state.offset + 875 nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
877 state.y * state.pitch + 876 state.offset + state.crtc->y *
878 state.x * state.bpp / 8); 877 state.pitch + state.crtc->x *
878 state.bpp / 8);
879 } 879 }
880 } 880 }
881 881
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 24273bacd885..0420ee861ea4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -28,7 +28,8 @@ int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *,
28struct nouveau_page_flip_state { 28struct nouveau_page_flip_state {
29 struct list_head head; 29 struct list_head head;
30 struct drm_pending_vblank_event *event; 30 struct drm_pending_vblank_event *event;
31 int crtc, bpp, pitch, x, y; 31 struct drm_crtc *crtc;
32 int bpp, pitch;
32 u64 offset; 33 u64 offset;
33}; 34};
34 35
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 11f8dd9c0edb..c00ff6e786a3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -22,13 +22,11 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <linux/apple-gmux.h>
26#include <linux/console.h> 25#include <linux/console.h>
27#include <linux/delay.h> 26#include <linux/delay.h>
28#include <linux/module.h> 27#include <linux/module.h>
29#include <linux/pci.h> 28#include <linux/pci.h>
30#include <linux/pm_runtime.h> 29#include <linux/pm_runtime.h>
31#include <linux/vgaarb.h>
32#include <linux/vga_switcheroo.h> 30#include <linux/vga_switcheroo.h>
33 31
34#include "drmP.h" 32#include "drmP.h"
@@ -315,13 +313,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
315 bool boot = false; 313 bool boot = false;
316 int ret; 314 int ret;
317 315
318 /* 316 if (vga_switcheroo_client_probe_defer(pdev))
319 * apple-gmux is needed on dual GPU MacBook Pro
320 * to probe the panel if we're the inactive GPU.
321 */
322 if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
323 apple_gmux_present() && pdev != vga_default_device() &&
324 !vga_switcheroo_handler_flags())
325 return -EPROBE_DEFER; 317 return -EPROBE_DEFER;
326 318
327 /* remove conflicting drivers (vesafb, efifb etc) */ 319 /* remove conflicting drivers (vesafb, efifb etc) */
@@ -970,7 +962,7 @@ driver_stub = {
970 .gem_prime_vmap = nouveau_gem_prime_vmap, 962 .gem_prime_vmap = nouveau_gem_prime_vmap,
971 .gem_prime_vunmap = nouveau_gem_prime_vunmap, 963 .gem_prime_vunmap = nouveau_gem_prime_vunmap,
972 964
973 .gem_free_object = nouveau_gem_object_del, 965 .gem_free_object_unlocked = nouveau_gem_object_del,
974 .gem_open_object = nouveau_gem_object_open, 966 .gem_open_object = nouveau_gem_object_open,
975 .gem_close_object = nouveau_gem_object_close, 967 .gem_close_object = nouveau_gem_object_close,
976 968
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 2e3a62d38fe9..64c4ce7115ad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -57,7 +57,8 @@ struct nouveau_fence_priv {
57 int (*context_new)(struct nouveau_channel *); 57 int (*context_new)(struct nouveau_channel *);
58 void (*context_del)(struct nouveau_channel *); 58 void (*context_del)(struct nouveau_channel *);
59 59
60 u32 contexts, context_base; 60 u32 contexts;
61 u64 context_base;
61 bool uevent; 62 bool uevent;
62}; 63};
63 64
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 675e9e077a95..08f9c6fa0f7f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -212,7 +212,6 @@ usif_notify_get(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
212 ntfy->p->base.event = &ntfy->p->e.base; 212 ntfy->p->base.event = &ntfy->p->e.base;
213 ntfy->p->base.file_priv = f; 213 ntfy->p->base.file_priv = f;
214 ntfy->p->base.pid = current->pid; 214 ntfy->p->base.pid = current->pid;
215 ntfy->p->base.destroy =(void(*)(struct drm_pending_event *))kfree;
216 ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF; 215 ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF;
217 ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply; 216 ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply;
218 217
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 3ffc2b0057bf..7a7788212df7 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1346,21 +1346,22 @@ nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1346 return 0; 1346 return 0;
1347} 1347}
1348 1348
1349static void 1349static int
1350nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, 1350nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
1351 uint32_t start, uint32_t size) 1351 uint32_t size)
1352{ 1352{
1353 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 1353 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1354 u32 end = min_t(u32, start + size, 256);
1355 u32 i; 1354 u32 i;
1356 1355
1357 for (i = start; i < end; i++) { 1356 for (i = 0; i < size; i++) {
1358 nv_crtc->lut.r[i] = r[i]; 1357 nv_crtc->lut.r[i] = r[i];
1359 nv_crtc->lut.g[i] = g[i]; 1358 nv_crtc->lut.g[i] = g[i];
1360 nv_crtc->lut.b[i] = b[i]; 1359 nv_crtc->lut.b[i] = b[i];
1361 } 1360 }
1362 1361
1363 nv50_crtc_lut_load(crtc); 1362 nv50_crtc_lut_load(crtc);
1363
1364 return 0;
1364} 1365}
1365 1366
1366static void 1367static void
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 73241c4eb7aa..336ad4de9981 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -2,6 +2,7 @@ config DRM_OMAP
2 tristate "OMAP DRM" 2 tristate "OMAP DRM"
3 depends on DRM 3 depends on DRM
4 depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM 4 depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
5 select OMAP2_DSS
5 select DRM_KMS_HELPER 6 select DRM_KMS_HELPER
6 select DRM_KMS_FB_HELPER 7 select DRM_KMS_FB_HELPER
7 select FB_SYS_FILLRECT 8 select FB_SYS_FILLRECT
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index 2a618afe0f53..c226da145fb3 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -1,80 +1,80 @@
1menu "OMAPDRM External Display Device Drivers" 1menu "OMAPDRM External Display Device Drivers"
2 2
3config DISPLAY_ENCODER_OPA362 3config DRM_OMAP_ENCODER_OPA362
4 tristate "OPA362 external analog amplifier" 4 tristate "OPA362 external analog amplifier"
5 help 5 help
6 Driver for OPA362 external analog TV amplifier controlled 6 Driver for OPA362 external analog TV amplifier controlled
7 through a GPIO. 7 through a GPIO.
8 8
9config DISPLAY_ENCODER_TFP410 9config DRM_OMAP_ENCODER_TFP410
10 tristate "TFP410 DPI to DVI Encoder" 10 tristate "TFP410 DPI to DVI Encoder"
11 help 11 help
12 Driver for TFP410 DPI to DVI encoder. 12 Driver for TFP410 DPI to DVI encoder.
13 13
14config DISPLAY_ENCODER_TPD12S015 14config DRM_OMAP_ENCODER_TPD12S015
15 tristate "TPD12S015 HDMI ESD protection and level shifter" 15 tristate "TPD12S015 HDMI ESD protection and level shifter"
16 help 16 help
17 Driver for TPD12S015, which offers HDMI ESD protection and level 17 Driver for TPD12S015, which offers HDMI ESD protection and level
18 shifting. 18 shifting.
19 19
20config DISPLAY_CONNECTOR_DVI 20config DRM_OMAP_CONNECTOR_DVI
21 tristate "DVI Connector" 21 tristate "DVI Connector"
22 depends on I2C 22 depends on I2C
23 help 23 help
24 Driver for a generic DVI connector. 24 Driver for a generic DVI connector.
25 25
26config DISPLAY_CONNECTOR_HDMI 26config DRM_OMAP_CONNECTOR_HDMI
27 tristate "HDMI Connector" 27 tristate "HDMI Connector"
28 help 28 help
29 Driver for a generic HDMI connector. 29 Driver for a generic HDMI connector.
30 30
31config DISPLAY_CONNECTOR_ANALOG_TV 31config DRM_OMAP_CONNECTOR_ANALOG_TV
32 tristate "Analog TV Connector" 32 tristate "Analog TV Connector"
33 help 33 help
34 Driver for a generic analog TV connector. 34 Driver for a generic analog TV connector.
35 35
36config DISPLAY_PANEL_DPI 36config DRM_OMAP_PANEL_DPI
37 tristate "Generic DPI panel" 37 tristate "Generic DPI panel"
38 help 38 help
39 Driver for generic DPI panels. 39 Driver for generic DPI panels.
40 40
41config DISPLAY_PANEL_DSI_CM 41config DRM_OMAP_PANEL_DSI_CM
42 tristate "Generic DSI Command Mode Panel" 42 tristate "Generic DSI Command Mode Panel"
43 depends on BACKLIGHT_CLASS_DEVICE 43 depends on BACKLIGHT_CLASS_DEVICE
44 help 44 help
45 Driver for generic DSI command mode panels. 45 Driver for generic DSI command mode panels.
46 46
47config DISPLAY_PANEL_SONY_ACX565AKM 47config DRM_OMAP_PANEL_SONY_ACX565AKM
48 tristate "ACX565AKM Panel" 48 tristate "ACX565AKM Panel"
49 depends on SPI && BACKLIGHT_CLASS_DEVICE 49 depends on SPI && BACKLIGHT_CLASS_DEVICE
50 help 50 help
51 This is the LCD panel used on Nokia N900 51 This is the LCD panel used on Nokia N900
52 52
53config DISPLAY_PANEL_LGPHILIPS_LB035Q02 53config DRM_OMAP_PANEL_LGPHILIPS_LB035Q02
54 tristate "LG.Philips LB035Q02 LCD Panel" 54 tristate "LG.Philips LB035Q02 LCD Panel"
55 depends on SPI 55 depends on SPI
56 help 56 help
57 LCD Panel used on the Gumstix Overo Palo35 57 LCD Panel used on the Gumstix Overo Palo35
58 58
59config DISPLAY_PANEL_SHARP_LS037V7DW01 59config DRM_OMAP_PANEL_SHARP_LS037V7DW01
60 tristate "Sharp LS037V7DW01 LCD Panel" 60 tristate "Sharp LS037V7DW01 LCD Panel"
61 depends on BACKLIGHT_CLASS_DEVICE 61 depends on BACKLIGHT_CLASS_DEVICE
62 help 62 help
63 LCD Panel used in TI's SDP3430 and EVM boards 63 LCD Panel used in TI's SDP3430 and EVM boards
64 64
65config DISPLAY_PANEL_TPO_TD028TTEC1 65config DRM_OMAP_PANEL_TPO_TD028TTEC1
66 tristate "TPO TD028TTEC1 LCD Panel" 66 tristate "TPO TD028TTEC1 LCD Panel"
67 depends on SPI 67 depends on SPI
68 help 68 help
69 LCD panel used in Openmoko. 69 LCD panel used in Openmoko.
70 70
71config DISPLAY_PANEL_TPO_TD043MTEA1 71config DRM_OMAP_PANEL_TPO_TD043MTEA1
72 tristate "TPO TD043MTEA1 LCD Panel" 72 tristate "TPO TD043MTEA1 LCD Panel"
73 depends on SPI 73 depends on SPI
74 help 74 help
75 LCD Panel used in OMAP3 Pandora 75 LCD Panel used in OMAP3 Pandora
76 76
77config DISPLAY_PANEL_NEC_NL8048HL11 77config DRM_OMAP_PANEL_NEC_NL8048HL11
78 tristate "NEC NL8048HL11 Panel" 78 tristate "NEC NL8048HL11 Panel"
79 depends on SPI 79 depends on SPI
80 depends on BACKLIGHT_CLASS_DEVICE 80 depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
index 9aa176bfbf2e..46baafb1a83e 100644
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ b/drivers/gpu/drm/omapdrm/displays/Makefile
@@ -1,14 +1,14 @@
1obj-$(CONFIG_DISPLAY_ENCODER_OPA362) += encoder-opa362.o 1obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o
2obj-$(CONFIG_DISPLAY_ENCODER_TFP410) += encoder-tfp410.o 2obj-$(CONFIG_DRM_OMAP_ENCODER_TFP410) += encoder-tfp410.o
3obj-$(CONFIG_DISPLAY_ENCODER_TPD12S015) += encoder-tpd12s015.o 3obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
4obj-$(CONFIG_DISPLAY_CONNECTOR_DVI) += connector-dvi.o 4obj-$(CONFIG_DRM_OMAP_CONNECTOR_DVI) += connector-dvi.o
5obj-$(CONFIG_DISPLAY_CONNECTOR_HDMI) += connector-hdmi.o 5obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o
6obj-$(CONFIG_DISPLAY_CONNECTOR_ANALOG_TV) += connector-analog-tv.o 6obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
7obj-$(CONFIG_DISPLAY_PANEL_DPI) += panel-dpi.o 7obj-$(CONFIG_DRM_OMAP_PANEL_DPI) += panel-dpi.o
8obj-$(CONFIG_DISPLAY_PANEL_DSI_CM) += panel-dsi-cm.o 8obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
9obj-$(CONFIG_DISPLAY_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o 9obj-$(CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
10obj-$(CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o 10obj-$(CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
11obj-$(CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o 11obj-$(CONFIG_DRM_OMAP_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
12obj-$(CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o 12obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
13obj-$(CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o 13obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
14obj-$(CONFIG_DISPLAY_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o 14obj-$(CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index 8511c648a15c..3485d1ecd655 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -14,9 +14,10 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/of.h> 15#include <linux/of.h>
16 16
17#include <video/omapdss.h>
18#include <video/omap-panel-data.h> 17#include <video/omap-panel-data.h>
19 18
19#include "../dss/omapdss.h"
20
20struct panel_drv_data { 21struct panel_drv_data {
21 struct omap_dss_device dssdev; 22 struct omap_dss_device dssdev;
22 struct omap_dss_device *in; 23 struct omap_dss_device *in;
@@ -25,7 +26,6 @@ struct panel_drv_data {
25 26
26 struct omap_video_timings timings; 27 struct omap_video_timings timings;
27 28
28 enum omap_dss_venc_type connector_type;
29 bool invert_polarity; 29 bool invert_polarity;
30}; 30};
31 31
@@ -45,10 +45,6 @@ static const struct omap_video_timings tvc_pal_timings = {
45 45
46static const struct of_device_id tvc_of_match[]; 46static const struct of_device_id tvc_of_match[];
47 47
48struct tvc_of_data {
49 enum omap_dss_venc_type connector_type;
50};
51
52#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) 48#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
53 49
54static int tvc_connect(struct omap_dss_device *dssdev) 50static int tvc_connect(struct omap_dss_device *dssdev)
@@ -99,7 +95,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
99 in->ops.atv->set_timings(in, &ddata->timings); 95 in->ops.atv->set_timings(in, &ddata->timings);
100 96
101 if (!ddata->dev->of_node) { 97 if (!ddata->dev->of_node) {
102 in->ops.atv->set_type(in, ddata->connector_type); 98 in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE);
103 99
104 in->ops.atv->invert_vid_out_polarity(in, 100 in->ops.atv->invert_vid_out_polarity(in,
105 ddata->invert_polarity); 101 ddata->invert_polarity);
@@ -207,7 +203,6 @@ static int tvc_probe_pdata(struct platform_device *pdev)
207 203
208 ddata->in = in; 204 ddata->in = in;
209 205
210 ddata->connector_type = pdata->connector_type;
211 ddata->invert_polarity = pdata->invert_polarity; 206 ddata->invert_polarity = pdata->invert_polarity;
212 207
213 dssdev = &ddata->dssdev; 208 dssdev = &ddata->dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index 747f26a55e43..75f7827525cf 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -15,10 +15,10 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16 16
17#include <drm/drm_edid.h> 17#include <drm/drm_edid.h>
18
19#include <video/omapdss.h>
20#include <video/omap-panel-data.h> 18#include <video/omap-panel-data.h>
21 19
20#include "../dss/omapdss.h"
21
22static const struct omap_video_timings dvic_default_timings = { 22static const struct omap_video_timings dvic_default_timings = {
23 .x_res = 640, 23 .x_res = 640,
24 .y_res = 480, 24 .y_res = 480,
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 225fd8d6ab31..7bdf83af9797 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -9,6 +9,7 @@
9 * the Free Software Foundation. 9 * the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/gpio/consumer.h>
12#include <linux/slab.h> 13#include <linux/slab.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/platform_device.h> 15#include <linux/platform_device.h>
@@ -16,10 +17,10 @@
16#include <linux/of_gpio.h> 17#include <linux/of_gpio.h>
17 18
18#include <drm/drm_edid.h> 19#include <drm/drm_edid.h>
19
20#include <video/omapdss.h>
21#include <video/omap-panel-data.h> 20#include <video/omap-panel-data.h>
22 21
22#include "../dss/omapdss.h"
23
23static const struct omap_video_timings hdmic_default_timings = { 24static const struct omap_video_timings hdmic_default_timings = {
24 .x_res = 640, 25 .x_res = 640,
25 .y_res = 480, 26 .y_res = 480,
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index 8c246c213e06..fe4e7ec3bab0 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -14,13 +14,12 @@
14 * the Free Software Foundation. 14 * the Free Software Foundation.
15 */ 15 */
16 16
17#include <linux/gpio.h> 17#include <linux/gpio/consumer.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/of_gpio.h>
22 21
23#include <video/omapdss.h> 22#include "../dss/omapdss.h"
24 23
25struct panel_drv_data { 24struct panel_drv_data {
26 struct omap_dss_device dssdev; 25 struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index 2fd5602880a7..d768217cefe0 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -9,14 +9,13 @@
9 * the Free Software Foundation. 9 * the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/gpio.h> 12#include <linux/gpio/consumer.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/of_gpio.h> 16#include <linux/of_gpio.h>
17 17
18#include <video/omapdss.h> 18#include "../dss/omapdss.h"
19#include <video/omap-panel-data.h>
20 19
21struct panel_drv_data { 20struct panel_drv_data {
22 struct omap_dss_device dssdev; 21 struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index 916a89978387..46855c8f5cbf 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -16,8 +16,7 @@
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/gpio/consumer.h> 17#include <linux/gpio/consumer.h>
18 18
19#include <video/omapdss.h> 19#include "../dss/omapdss.h"
20#include <video/omap-panel-data.h>
21 20
22struct panel_drv_data { 21struct panel_drv_data {
23 struct omap_dss_device dssdev; 22 struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index e780fd4f8b46..7f16f985ab22 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -9,17 +9,19 @@
9 * the Free Software Foundation. 9 * the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/gpio.h> 12#include <linux/gpio/consumer.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/of.h> 16#include <linux/of.h>
17#include <linux/of_gpio.h> 17#include <linux/of_gpio.h>
18#include <linux/regulator/consumer.h>
18 19
19#include <video/omapdss.h>
20#include <video/omap-panel-data.h> 20#include <video/omap-panel-data.h>
21#include <video/of_display_timing.h> 21#include <video/of_display_timing.h>
22 22
23#include "../dss/omapdss.h"
24
23struct panel_drv_data { 25struct panel_drv_data {
24 struct omap_dss_device dssdev; 26 struct omap_dss_device dssdev;
25 struct omap_dss_device *in; 27 struct omap_dss_device *in;
@@ -32,6 +34,7 @@ struct panel_drv_data {
32 int backlight_gpio; 34 int backlight_gpio;
33 35
34 struct gpio_desc *enable_gpio; 36 struct gpio_desc *enable_gpio;
37 struct regulator *vcc_supply;
35}; 38};
36 39
37#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) 40#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -83,6 +86,12 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev)
83 if (r) 86 if (r)
84 return r; 87 return r;
85 88
89 r = regulator_enable(ddata->vcc_supply);
90 if (r) {
91 in->ops.dpi->disable(in);
92 return r;
93 }
94
86 gpiod_set_value_cansleep(ddata->enable_gpio, 1); 95 gpiod_set_value_cansleep(ddata->enable_gpio, 1);
87 96
88 if (gpio_is_valid(ddata->backlight_gpio)) 97 if (gpio_is_valid(ddata->backlight_gpio))
@@ -105,6 +114,7 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev)
105 gpio_set_value_cansleep(ddata->backlight_gpio, 0); 114 gpio_set_value_cansleep(ddata->backlight_gpio, 0);
106 115
107 gpiod_set_value_cansleep(ddata->enable_gpio, 0); 116 gpiod_set_value_cansleep(ddata->enable_gpio, 0);
117 regulator_disable(ddata->vcc_supply);
108 118
109 in->ops.dpi->disable(in); 119 in->ops.dpi->disable(in);
110 120
@@ -213,6 +223,20 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
213 223
214 ddata->enable_gpio = gpio; 224 ddata->enable_gpio = gpio;
215 225
226 /*
227 * Many different panels are supported by this driver and there are
228 * probably very different needs for their reset pins in regards to
229 * timing and order relative to the enable gpio. So for now it's just
230 * ensured that the reset line isn't active.
231 */
232 gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
233 if (IS_ERR(gpio))
234 return PTR_ERR(gpio);
235
236 ddata->vcc_supply = devm_regulator_get(&pdev->dev, "vcc");
237 if (IS_ERR(ddata->vcc_supply))
238 return PTR_ERR(ddata->vcc_supply);
239
216 ddata->backlight_gpio = -ENOENT; 240 ddata->backlight_gpio = -ENOENT;
217 241
218 r = of_get_display_timing(node, "panel-timing", &timing); 242 r = of_get_display_timing(node, "panel-timing", &timing);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 36485c2137ce..1b0cf2d8224b 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -14,7 +14,7 @@
14#include <linux/backlight.h> 14#include <linux/backlight.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/fb.h> 16#include <linux/fb.h>
17#include <linux/gpio.h> 17#include <linux/gpio/consumer.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/jiffies.h> 19#include <linux/jiffies.h>
20#include <linux/module.h> 20#include <linux/module.h>
@@ -25,10 +25,10 @@
25#include <linux/of_device.h> 25#include <linux/of_device.h>
26#include <linux/of_gpio.h> 26#include <linux/of_gpio.h>
27 27
28#include <video/omapdss.h>
29#include <video/omap-panel-data.h>
30#include <video/mipi_display.h> 28#include <video/mipi_display.h>
31 29
30#include "../dss/omapdss.h"
31
32/* DSI Virtual channel. Hardcoded for now. */ 32/* DSI Virtual channel. Hardcoded for now. */
33#define TCH 0 33#define TCH 0
34 34
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index 458f77bc473d..6dfb96cea293 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -15,9 +15,9 @@
15#include <linux/spi/spi.h> 15#include <linux/spi/spi.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/gpio.h> 17#include <linux/gpio.h>
18#include <linux/gpio/consumer.h>
18 19
19#include <video/omapdss.h> 20#include "../dss/omapdss.h"
20#include <video/omap-panel-data.h>
21 21
22static struct omap_video_timings lb035q02_timings = { 22static struct omap_video_timings lb035q02_timings = {
23 .x_res = 320, 23 .x_res = 320,
@@ -50,9 +50,6 @@ struct panel_drv_data {
50 50
51 struct omap_video_timings videomode; 51 struct omap_video_timings videomode;
52 52
53 /* used for non-DT boot, to be removed */
54 int backlight_gpio;
55
56 struct gpio_desc *enable_gpio; 53 struct gpio_desc *enable_gpio;
57}; 54};
58 55
@@ -170,9 +167,6 @@ static int lb035q02_enable(struct omap_dss_device *dssdev)
170 if (ddata->enable_gpio) 167 if (ddata->enable_gpio)
171 gpiod_set_value_cansleep(ddata->enable_gpio, 1); 168 gpiod_set_value_cansleep(ddata->enable_gpio, 1);
172 169
173 if (gpio_is_valid(ddata->backlight_gpio))
174 gpio_set_value_cansleep(ddata->backlight_gpio, 1);
175
176 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; 170 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
177 171
178 return 0; 172 return 0;
@@ -189,9 +183,6 @@ static void lb035q02_disable(struct omap_dss_device *dssdev)
189 if (ddata->enable_gpio) 183 if (ddata->enable_gpio)
190 gpiod_set_value_cansleep(ddata->enable_gpio, 0); 184 gpiod_set_value_cansleep(ddata->enable_gpio, 0);
191 185
192 if (gpio_is_valid(ddata->backlight_gpio))
193 gpio_set_value_cansleep(ddata->backlight_gpio, 0);
194
195 in->ops.dpi->disable(in); 186 in->ops.dpi->disable(in);
196 187
197 dssdev->state = OMAP_DSS_DISPLAY_DISABLED; 188 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
@@ -255,8 +246,6 @@ static int lb035q02_probe_of(struct spi_device *spi)
255 246
256 ddata->enable_gpio = gpio; 247 ddata->enable_gpio = gpio;
257 248
258 ddata->backlight_gpio = -ENOENT;
259
260 in = omapdss_of_find_source_for_first_ep(node); 249 in = omapdss_of_find_source_for_first_ep(node);
261 if (IS_ERR(in)) { 250 if (IS_ERR(in)) {
262 dev_err(&spi->dev, "failed to find video source\n"); 251 dev_err(&spi->dev, "failed to find video source\n");
@@ -289,13 +278,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
289 if (r) 278 if (r)
290 return r; 279 return r;
291 280
292 if (gpio_is_valid(ddata->backlight_gpio)) {
293 r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio,
294 GPIOF_OUT_INIT_LOW, "panel backlight");
295 if (r)
296 goto err_gpio;
297 }
298
299 ddata->videomode = lb035q02_timings; 281 ddata->videomode = lb035q02_timings;
300 282
301 dssdev = &ddata->dssdev; 283 dssdev = &ddata->dssdev;
@@ -315,7 +297,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
315 return 0; 297 return 0;
316 298
317err_reg: 299err_reg:
318err_gpio:
319 omap_dss_put_device(ddata->in); 300 omap_dss_put_device(ddata->in);
320 return r; 301 return r;
321} 302}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index 780cb263a318..fc4c238c9583 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -15,10 +15,10 @@
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/spi/spi.h> 16#include <linux/spi/spi.h>
17#include <linux/fb.h> 17#include <linux/fb.h>
18#include <linux/gpio.h> 18#include <linux/gpio/consumer.h>
19#include <linux/of_gpio.h> 19#include <linux/of_gpio.h>
20 20
21#include <video/omapdss.h> 21#include "../dss/omapdss.h"
22 22
23struct panel_drv_data { 23struct panel_drv_data {
24 struct omap_dss_device dssdev; 24 struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 529a017602e4..3d3efc561ea9 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -10,14 +10,14 @@
10 */ 10 */
11 11
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <linux/gpio.h> 13#include <linux/gpio/consumer.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/of_gpio.h>
17#include <linux/platform_device.h> 16#include <linux/platform_device.h>
18#include <linux/slab.h> 17#include <linux/slab.h>
19#include <linux/regulator/consumer.h> 18#include <linux/regulator/consumer.h>
20#include <video/omapdss.h> 19
20#include "../dss/omapdss.h"
21 21
22struct panel_drv_data { 22struct panel_drv_data {
23 struct omap_dss_device dssdev; 23 struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 31efcca801bd..157c512205d1 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -29,13 +29,14 @@
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/backlight.h> 30#include <linux/backlight.h>
31#include <linux/fb.h> 31#include <linux/fb.h>
32#include <linux/gpio.h> 32#include <linux/gpio/consumer.h>
33#include <linux/of.h> 33#include <linux/of.h>
34#include <linux/of_gpio.h> 34#include <linux/of_gpio.h>
35 35
36#include <video/omapdss.h>
37#include <video/omap-panel-data.h> 36#include <video/omap-panel-data.h>
38 37
38#include "../dss/omapdss.h"
39
39#define MIPID_CMD_READ_DISP_ID 0x04 40#define MIPID_CMD_READ_DISP_ID 0x04
40#define MIPID_CMD_READ_RED 0x06 41#define MIPID_CMD_READ_RED 0x06
41#define MIPID_CMD_READ_GREEN 0x07 42#define MIPID_CMD_READ_GREEN 0x07
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index bd8d85041926..e859b3f893f7 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -28,7 +28,8 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
30#include <linux/gpio.h> 30#include <linux/gpio.h>
31#include <video/omapdss.h> 31
32#include "../dss/omapdss.h"
32 33
33struct panel_drv_data { 34struct panel_drv_data {
34 struct omap_dss_device dssdev; 35 struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index 03e2beb7b4f0..66c6bbe6472b 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -14,12 +14,12 @@
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/spi/spi.h> 15#include <linux/spi/spi.h>
16#include <linux/regulator/consumer.h> 16#include <linux/regulator/consumer.h>
17#include <linux/gpio.h> 17#include <linux/gpio/consumer.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/of_gpio.h> 20#include <linux/of_gpio.h>
21 21
22#include <video/omapdss.h> 22#include "../dss/omapdss.h"
23 23
24#define TPO_R02_MODE(x) ((x) & 7) 24#define TPO_R02_MODE(x) ((x) & 7)
25#define TPO_R02_MODE_800x480 7 25#define TPO_R02_MODE_800x480 7
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
index 7e4e5bebabbe..6a3ebfcd7223 100644
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ b/drivers/gpu/drm/omapdrm/dss/core.c
@@ -35,8 +35,7 @@
35#include <linux/suspend.h> 35#include <linux/suspend.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37 37
38#include <video/omapdss.h> 38#include "omapdss.h"
39
40#include "dss.h" 39#include "dss.h"
41#include "dss_features.h" 40#include "dss_features.h"
42 41
@@ -196,8 +195,6 @@ static int __init omap_dss_probe(struct platform_device *pdev)
196 core.default_display_name = def_disp_name; 195 core.default_display_name = def_disp_name;
197 else if (pdata->default_display_name) 196 else if (pdata->default_display_name)
198 core.default_display_name = pdata->default_display_name; 197 core.default_display_name = pdata->default_display_name;
199 else if (pdata->default_device)
200 core.default_display_name = pdata->default_device->name;
201 198
202 return 0; 199 return 0;
203 200
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index f83608b69e68..535240fba671 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -41,8 +41,7 @@
41#include <linux/of.h> 41#include <linux/of.h>
42#include <linux/component.h> 42#include <linux/component.h>
43 43
44#include <video/omapdss.h> 44#include "omapdss.h"
45
46#include "dss.h" 45#include "dss.h"
47#include "dss_features.h" 46#include "dss_features.h"
48#include "dispc.h" 47#include "dispc.h"
@@ -113,9 +112,14 @@ struct dispc_features {
113 * never both, we can just use this flag for now. 112 * never both, we can just use this flag for now.
114 */ 113 */
115 bool reverse_ilace_field_order:1; 114 bool reverse_ilace_field_order:1;
115
116 bool has_gamma_table:1;
117
118 bool has_gamma_i734_bug:1;
116}; 119};
117 120
118#define DISPC_MAX_NR_FIFOS 5 121#define DISPC_MAX_NR_FIFOS 5
122#define DISPC_MAX_CHANNEL_GAMMA 4
119 123
120static struct { 124static struct {
121 struct platform_device *pdev; 125 struct platform_device *pdev;
@@ -135,6 +139,8 @@ static struct {
135 bool ctx_valid; 139 bool ctx_valid;
136 u32 ctx[DISPC_SZ_REGS / sizeof(u32)]; 140 u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
137 141
142 u32 *gamma_table[DISPC_MAX_CHANNEL_GAMMA];
143
138 const struct dispc_features *feat; 144 const struct dispc_features *feat;
139 145
140 bool is_enabled; 146 bool is_enabled;
@@ -178,11 +184,19 @@ struct dispc_reg_field {
178 u8 low; 184 u8 low;
179}; 185};
180 186
187struct dispc_gamma_desc {
188 u32 len;
189 u32 bits;
190 u16 reg;
191 bool has_index;
192};
193
181static const struct { 194static const struct {
182 const char *name; 195 const char *name;
183 u32 vsync_irq; 196 u32 vsync_irq;
184 u32 framedone_irq; 197 u32 framedone_irq;
185 u32 sync_lost_irq; 198 u32 sync_lost_irq;
199 struct dispc_gamma_desc gamma;
186 struct dispc_reg_field reg_desc[DISPC_MGR_FLD_NUM]; 200 struct dispc_reg_field reg_desc[DISPC_MGR_FLD_NUM];
187} mgr_desc[] = { 201} mgr_desc[] = {
188 [OMAP_DSS_CHANNEL_LCD] = { 202 [OMAP_DSS_CHANNEL_LCD] = {
@@ -190,6 +204,12 @@ static const struct {
190 .vsync_irq = DISPC_IRQ_VSYNC, 204 .vsync_irq = DISPC_IRQ_VSYNC,
191 .framedone_irq = DISPC_IRQ_FRAMEDONE, 205 .framedone_irq = DISPC_IRQ_FRAMEDONE,
192 .sync_lost_irq = DISPC_IRQ_SYNC_LOST, 206 .sync_lost_irq = DISPC_IRQ_SYNC_LOST,
207 .gamma = {
208 .len = 256,
209 .bits = 8,
210 .reg = DISPC_GAMMA_TABLE0,
211 .has_index = true,
212 },
193 .reg_desc = { 213 .reg_desc = {
194 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 0, 0 }, 214 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 0, 0 },
195 [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL, 3, 3 }, 215 [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL, 3, 3 },
@@ -207,6 +227,12 @@ static const struct {
207 .vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN, 227 .vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN,
208 .framedone_irq = DISPC_IRQ_FRAMEDONETV, 228 .framedone_irq = DISPC_IRQ_FRAMEDONETV,
209 .sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT, 229 .sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT,
230 .gamma = {
231 .len = 1024,
232 .bits = 10,
233 .reg = DISPC_GAMMA_TABLE2,
234 .has_index = false,
235 },
210 .reg_desc = { 236 .reg_desc = {
211 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 }, 237 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 },
212 [DISPC_MGR_FLD_STNTFT] = { }, 238 [DISPC_MGR_FLD_STNTFT] = { },
@@ -224,6 +250,12 @@ static const struct {
224 .vsync_irq = DISPC_IRQ_VSYNC2, 250 .vsync_irq = DISPC_IRQ_VSYNC2,
225 .framedone_irq = DISPC_IRQ_FRAMEDONE2, 251 .framedone_irq = DISPC_IRQ_FRAMEDONE2,
226 .sync_lost_irq = DISPC_IRQ_SYNC_LOST2, 252 .sync_lost_irq = DISPC_IRQ_SYNC_LOST2,
253 .gamma = {
254 .len = 256,
255 .bits = 8,
256 .reg = DISPC_GAMMA_TABLE1,
257 .has_index = true,
258 },
227 .reg_desc = { 259 .reg_desc = {
228 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL2, 0, 0 }, 260 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL2, 0, 0 },
229 [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL2, 3, 3 }, 261 [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL2, 3, 3 },
@@ -241,6 +273,12 @@ static const struct {
241 .vsync_irq = DISPC_IRQ_VSYNC3, 273 .vsync_irq = DISPC_IRQ_VSYNC3,
242 .framedone_irq = DISPC_IRQ_FRAMEDONE3, 274 .framedone_irq = DISPC_IRQ_FRAMEDONE3,
243 .sync_lost_irq = DISPC_IRQ_SYNC_LOST3, 275 .sync_lost_irq = DISPC_IRQ_SYNC_LOST3,
276 .gamma = {
277 .len = 256,
278 .bits = 8,
279 .reg = DISPC_GAMMA_TABLE3,
280 .has_index = true,
281 },
244 .reg_desc = { 282 .reg_desc = {
245 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL3, 0, 0 }, 283 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL3, 0, 0 },
246 [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL3, 3, 3 }, 284 [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL3, 3, 3 },
@@ -1084,20 +1122,6 @@ static u32 dispc_ovl_get_burst_size(enum omap_plane plane)
1084 return unit * 8; 1122 return unit * 8;
1085} 1123}
1086 1124
1087void dispc_enable_gamma_table(bool enable)
1088{
1089 /*
1090 * This is partially implemented to support only disabling of
1091 * the gamma table.
1092 */
1093 if (enable) {
1094 DSSWARN("Gamma table enabling for TV not yet supported");
1095 return;
1096 }
1097
1098 REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
1099}
1100
1101static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable) 1125static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
1102{ 1126{
1103 if (channel == OMAP_DSS_CHANNEL_DIGIT) 1127 if (channel == OMAP_DSS_CHANNEL_DIGIT)
@@ -3299,30 +3323,21 @@ static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div,
3299 3323
3300static unsigned long dispc_fclk_rate(void) 3324static unsigned long dispc_fclk_rate(void)
3301{ 3325{
3302 struct dss_pll *pll; 3326 unsigned long r;
3303 unsigned long r = 0; 3327 enum dss_clk_source src;
3328
3329 src = dss_get_dispc_clk_source();
3304 3330
3305 switch (dss_get_dispc_clk_source()) { 3331 if (src == DSS_CLK_SRC_FCK) {
3306 case OMAP_DSS_CLK_SRC_FCK:
3307 r = dss_get_dispc_clk_rate(); 3332 r = dss_get_dispc_clk_rate();
3308 break; 3333 } else {
3309 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 3334 struct dss_pll *pll;
3310 pll = dss_pll_find("dsi0"); 3335 unsigned clkout_idx;
3311 if (!pll)
3312 pll = dss_pll_find("video0");
3313 3336
3314 r = pll->cinfo.clkout[0]; 3337 pll = dss_pll_find_by_src(src);
3315 break; 3338 clkout_idx = dss_pll_get_clkout_idx_for_src(src);
3316 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
3317 pll = dss_pll_find("dsi1");
3318 if (!pll)
3319 pll = dss_pll_find("video1");
3320 3339
3321 r = pll->cinfo.clkout[0]; 3340 r = pll->cinfo.clkout[clkout_idx];
3322 break;
3323 default:
3324 BUG();
3325 return 0;
3326 } 3341 }
3327 3342
3328 return r; 3343 return r;
@@ -3330,43 +3345,31 @@ static unsigned long dispc_fclk_rate(void)
3330 3345
3331static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel) 3346static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
3332{ 3347{
3333 struct dss_pll *pll;
3334 int lcd; 3348 int lcd;
3335 unsigned long r; 3349 unsigned long r;
3336 u32 l; 3350 enum dss_clk_source src;
3337
3338 if (dss_mgr_is_lcd(channel)) {
3339 l = dispc_read_reg(DISPC_DIVISORo(channel));
3340 3351
3341 lcd = FLD_GET(l, 23, 16); 3352 /* for TV, LCLK rate is the FCLK rate */
3353 if (!dss_mgr_is_lcd(channel))
3354 return dispc_fclk_rate();
3342 3355
3343 switch (dss_get_lcd_clk_source(channel)) { 3356 src = dss_get_lcd_clk_source(channel);
3344 case OMAP_DSS_CLK_SRC_FCK:
3345 r = dss_get_dispc_clk_rate();
3346 break;
3347 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
3348 pll = dss_pll_find("dsi0");
3349 if (!pll)
3350 pll = dss_pll_find("video0");
3351 3357
3352 r = pll->cinfo.clkout[0]; 3358 if (src == DSS_CLK_SRC_FCK) {
3353 break; 3359 r = dss_get_dispc_clk_rate();
3354 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: 3360 } else {
3355 pll = dss_pll_find("dsi1"); 3361 struct dss_pll *pll;
3356 if (!pll) 3362 unsigned clkout_idx;
3357 pll = dss_pll_find("video1");
3358 3363
3359 r = pll->cinfo.clkout[0]; 3364 pll = dss_pll_find_by_src(src);
3360 break; 3365 clkout_idx = dss_pll_get_clkout_idx_for_src(src);
3361 default:
3362 BUG();
3363 return 0;
3364 }
3365 3366
3366 return r / lcd; 3367 r = pll->cinfo.clkout[clkout_idx];
3367 } else {
3368 return dispc_fclk_rate();
3369 } 3368 }
3369
3370 lcd = REG_GET(DISPC_DIVISORo(channel), 23, 16);
3371
3372 return r / lcd;
3370} 3373}
3371 3374
3372static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel) 3375static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
@@ -3426,15 +3429,14 @@ static unsigned long dispc_plane_lclk_rate(enum omap_plane plane)
3426static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel) 3429static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel)
3427{ 3430{
3428 int lcd, pcd; 3431 int lcd, pcd;
3429 enum omap_dss_clk_source lcd_clk_src; 3432 enum dss_clk_source lcd_clk_src;
3430 3433
3431 seq_printf(s, "- %s -\n", mgr_desc[channel].name); 3434 seq_printf(s, "- %s -\n", mgr_desc[channel].name);
3432 3435
3433 lcd_clk_src = dss_get_lcd_clk_source(channel); 3436 lcd_clk_src = dss_get_lcd_clk_source(channel);
3434 3437
3435 seq_printf(s, "%s clk source = %s (%s)\n", mgr_desc[channel].name, 3438 seq_printf(s, "%s clk source = %s\n", mgr_desc[channel].name,
3436 dss_get_generic_clk_source_name(lcd_clk_src), 3439 dss_get_clk_source_name(lcd_clk_src));
3437 dss_feat_get_clk_source_name(lcd_clk_src));
3438 3440
3439 dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd); 3441 dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd);
3440 3442
@@ -3448,16 +3450,15 @@ void dispc_dump_clocks(struct seq_file *s)
3448{ 3450{
3449 int lcd; 3451 int lcd;
3450 u32 l; 3452 u32 l;
3451 enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); 3453 enum dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
3452 3454
3453 if (dispc_runtime_get()) 3455 if (dispc_runtime_get())
3454 return; 3456 return;
3455 3457
3456 seq_printf(s, "- DISPC -\n"); 3458 seq_printf(s, "- DISPC -\n");
3457 3459
3458 seq_printf(s, "dispc fclk source = %s (%s)\n", 3460 seq_printf(s, "dispc fclk source = %s\n",
3459 dss_get_generic_clk_source_name(dispc_clk_src), 3461 dss_get_clk_source_name(dispc_clk_src));
3460 dss_feat_get_clk_source_name(dispc_clk_src));
3461 3462
3462 seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate()); 3463 seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
3463 3464
@@ -3814,6 +3815,139 @@ void dispc_disable_sidle(void)
3814 REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */ 3815 REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */
3815} 3816}
3816 3817
3818u32 dispc_mgr_gamma_size(enum omap_channel channel)
3819{
3820 const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
3821
3822 if (!dispc.feat->has_gamma_table)
3823 return 0;
3824
3825 return gdesc->len;
3826}
3827EXPORT_SYMBOL(dispc_mgr_gamma_size);
3828
3829static void dispc_mgr_write_gamma_table(enum omap_channel channel)
3830{
3831 const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
3832 u32 *table = dispc.gamma_table[channel];
3833 unsigned int i;
3834
3835 DSSDBG("%s: channel %d\n", __func__, channel);
3836
3837 for (i = 0; i < gdesc->len; ++i) {
3838 u32 v = table[i];
3839
3840 if (gdesc->has_index)
3841 v |= i << 24;
3842 else if (i == 0)
3843 v |= 1 << 31;
3844
3845 dispc_write_reg(gdesc->reg, v);
3846 }
3847}
3848
3849static void dispc_restore_gamma_tables(void)
3850{
3851 DSSDBG("%s()\n", __func__);
3852
3853 if (!dispc.feat->has_gamma_table)
3854 return;
3855
3856 dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD);
3857
3858 dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_DIGIT);
3859
3860 if (dss_has_feature(FEAT_MGR_LCD2))
3861 dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD2);
3862
3863 if (dss_has_feature(FEAT_MGR_LCD3))
3864 dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD3);
3865}
3866
3867static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = {
3868 { .red = 0, .green = 0, .blue = 0, },
3869 { .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, },
3870};
3871
3872void dispc_mgr_set_gamma(enum omap_channel channel,
3873 const struct drm_color_lut *lut,
3874 unsigned int length)
3875{
3876 const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
3877 u32 *table = dispc.gamma_table[channel];
3878 uint i;
3879
3880 DSSDBG("%s: channel %d, lut len %u, hw len %u\n", __func__,
3881 channel, length, gdesc->len);
3882
3883 if (!dispc.feat->has_gamma_table)
3884 return;
3885
3886 if (lut == NULL || length < 2) {
3887 lut = dispc_mgr_gamma_default_lut;
3888 length = ARRAY_SIZE(dispc_mgr_gamma_default_lut);
3889 }
3890
3891 for (i = 0; i < length - 1; ++i) {
3892 uint first = i * (gdesc->len - 1) / (length - 1);
3893 uint last = (i + 1) * (gdesc->len - 1) / (length - 1);
3894 uint w = last - first;
3895 u16 r, g, b;
3896 uint j;
3897
3898 if (w == 0)
3899 continue;
3900
3901 for (j = 0; j <= w; j++) {
3902 r = (lut[i].red * (w - j) + lut[i+1].red * j) / w;
3903 g = (lut[i].green * (w - j) + lut[i+1].green * j) / w;
3904 b = (lut[i].blue * (w - j) + lut[i+1].blue * j) / w;
3905
3906 r >>= 16 - gdesc->bits;
3907 g >>= 16 - gdesc->bits;
3908 b >>= 16 - gdesc->bits;
3909
3910 table[first + j] = (r << (gdesc->bits * 2)) |
3911 (g << gdesc->bits) | b;
3912 }
3913 }
3914
3915 if (dispc.is_enabled)
3916 dispc_mgr_write_gamma_table(channel);
3917}
3918EXPORT_SYMBOL(dispc_mgr_set_gamma);
3919
3920static int dispc_init_gamma_tables(void)
3921{
3922 int channel;
3923
3924 if (!dispc.feat->has_gamma_table)
3925 return 0;
3926
3927 for (channel = 0; channel < ARRAY_SIZE(dispc.gamma_table); channel++) {
3928 const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
3929 u32 *gt;
3930
3931 if (channel == OMAP_DSS_CHANNEL_LCD2 &&
3932 !dss_has_feature(FEAT_MGR_LCD2))
3933 continue;
3934
3935 if (channel == OMAP_DSS_CHANNEL_LCD3 &&
3936 !dss_has_feature(FEAT_MGR_LCD3))
3937 continue;
3938
3939 gt = devm_kmalloc_array(&dispc.pdev->dev, gdesc->len,
3940 sizeof(u32), GFP_KERNEL);
3941 if (!gt)
3942 return -ENOMEM;
3943
3944 dispc.gamma_table[channel] = gt;
3945
3946 dispc_mgr_set_gamma(channel, NULL, 0);
3947 }
3948 return 0;
3949}
3950
3817static void _omap_dispc_initial_config(void) 3951static void _omap_dispc_initial_config(void)
3818{ 3952{
3819 u32 l; 3953 u32 l;
@@ -3829,8 +3963,15 @@ static void _omap_dispc_initial_config(void)
3829 dispc.core_clk_rate = dispc_fclk_rate(); 3963 dispc.core_clk_rate = dispc_fclk_rate();
3830 } 3964 }
3831 3965
3832 /* FUNCGATED */ 3966 /* Use gamma table mode, instead of palette mode */
3833 if (dss_has_feature(FEAT_FUNCGATED)) 3967 if (dispc.feat->has_gamma_table)
3968 REG_FLD_MOD(DISPC_CONFIG, 1, 3, 3);
3969
3970 /* For older DSS versions (FEAT_FUNCGATED) this enables
3971 * func-clock auto-gating. For newer versions
3972 * (dispc.feat->has_gamma_table) this enables tv-out gamma tables.
3973 */
3974 if (dss_has_feature(FEAT_FUNCGATED) || dispc.feat->has_gamma_table)
3834 REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9); 3975 REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
3835 3976
3836 dispc_setup_color_conv_coef(); 3977 dispc_setup_color_conv_coef();
@@ -3934,6 +4075,8 @@ static const struct dispc_features omap44xx_dispc_feats = {
3934 .has_writeback = true, 4075 .has_writeback = true,
3935 .supports_double_pixel = true, 4076 .supports_double_pixel = true,
3936 .reverse_ilace_field_order = true, 4077 .reverse_ilace_field_order = true,
4078 .has_gamma_table = true,
4079 .has_gamma_i734_bug = true,
3937}; 4080};
3938 4081
3939static const struct dispc_features omap54xx_dispc_feats = { 4082static const struct dispc_features omap54xx_dispc_feats = {
@@ -3959,6 +4102,8 @@ static const struct dispc_features omap54xx_dispc_feats = {
3959 .has_writeback = true, 4102 .has_writeback = true,
3960 .supports_double_pixel = true, 4103 .supports_double_pixel = true,
3961 .reverse_ilace_field_order = true, 4104 .reverse_ilace_field_order = true,
4105 .has_gamma_table = true,
4106 .has_gamma_i734_bug = true,
3962}; 4107};
3963 4108
3964static int dispc_init_features(struct platform_device *pdev) 4109static int dispc_init_features(struct platform_device *pdev)
@@ -4050,6 +4195,168 @@ void dispc_free_irq(void *dev_id)
4050} 4195}
4051EXPORT_SYMBOL(dispc_free_irq); 4196EXPORT_SYMBOL(dispc_free_irq);
4052 4197
4198/*
4199 * Workaround for errata i734 in DSS dispc
4200 * - LCD1 Gamma Correction Is Not Working When GFX Pipe Is Disabled
4201 *
4202 * For gamma tables to work on LCD1 the GFX plane has to be used at
4203 * least once after DSS HW has come out of reset. The workaround
4204 * sets up a minimal LCD setup with GFX plane and waits for one
4205 * vertical sync irq before disabling the setup and continuing with
4206 * the context restore. The physical outputs are gated during the
4207 * operation. This workaround requires that gamma table's LOADMODE
4208 * is set to 0x2 in DISPC_CONTROL1 register.
4209 *
4210 * For details see:
4211 * OMAP543x Multimedia Device Silicon Revision 2.0 Silicon Errata
4212 * Literature Number: SWPZ037E
4213 * Or some other relevant errata document for the DSS IP version.
4214 */
4215
4216static const struct dispc_errata_i734_data {
4217 struct omap_video_timings timings;
4218 struct omap_overlay_info ovli;
4219 struct omap_overlay_manager_info mgri;
4220 struct dss_lcd_mgr_config lcd_conf;
4221} i734 = {
4222 .timings = {
4223 .x_res = 8, .y_res = 1,
4224 .pixelclock = 16000000,
4225 .hsw = 8, .hfp = 4, .hbp = 4,
4226 .vsw = 1, .vfp = 1, .vbp = 1,
4227 .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
4228 .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
4229 .interlace = false,
4230 .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
4231 .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
4232 .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
4233 .double_pixel = false,
4234 },
4235 .ovli = {
4236 .screen_width = 1,
4237 .width = 1, .height = 1,
4238 .color_mode = OMAP_DSS_COLOR_RGB24U,
4239 .rotation = OMAP_DSS_ROT_0,
4240 .rotation_type = OMAP_DSS_ROT_DMA,
4241 .mirror = 0,
4242 .pos_x = 0, .pos_y = 0,
4243 .out_width = 0, .out_height = 0,
4244 .global_alpha = 0xff,
4245 .pre_mult_alpha = 0,
4246 .zorder = 0,
4247 },
4248 .mgri = {
4249 .default_color = 0,
4250 .trans_enabled = false,
4251 .partial_alpha_enabled = false,
4252 .cpr_enable = false,
4253 },
4254 .lcd_conf = {
4255 .io_pad_mode = DSS_IO_PAD_MODE_BYPASS,
4256 .stallmode = false,
4257 .fifohandcheck = false,
4258 .clock_info = {
4259 .lck_div = 1,
4260 .pck_div = 2,
4261 },
4262 .video_port_width = 24,
4263 .lcden_sig_polarity = 0,
4264 },
4265};
4266
4267static struct i734_buf {
4268 size_t size;
4269 dma_addr_t paddr;
4270 void *vaddr;
4271} i734_buf;
4272
4273static int dispc_errata_i734_wa_init(void)
4274{
4275 if (!dispc.feat->has_gamma_i734_bug)
4276 return 0;
4277
4278 i734_buf.size = i734.ovli.width * i734.ovli.height *
4279 color_mode_to_bpp(i734.ovli.color_mode) / 8;
4280
4281 i734_buf.vaddr = dma_alloc_writecombine(&dispc.pdev->dev, i734_buf.size,
4282 &i734_buf.paddr, GFP_KERNEL);
4283 if (!i734_buf.vaddr) {
4284 dev_err(&dispc.pdev->dev, "%s: dma_alloc_writecombine failed",
4285 __func__);
4286 return -ENOMEM;
4287 }
4288
4289 return 0;
4290}
4291
4292static void dispc_errata_i734_wa_fini(void)
4293{
4294 if (!dispc.feat->has_gamma_i734_bug)
4295 return;
4296
4297 dma_free_writecombine(&dispc.pdev->dev, i734_buf.size, i734_buf.vaddr,
4298 i734_buf.paddr);
4299}
4300
4301static void dispc_errata_i734_wa(void)
4302{
4303 u32 framedone_irq = dispc_mgr_get_framedone_irq(OMAP_DSS_CHANNEL_LCD);
4304 struct omap_overlay_info ovli;
4305 struct dss_lcd_mgr_config lcd_conf;
4306 u32 gatestate;
4307 unsigned int count;
4308
4309 if (!dispc.feat->has_gamma_i734_bug)
4310 return;
4311
4312 gatestate = REG_GET(DISPC_CONFIG, 8, 4);
4313
4314 ovli = i734.ovli;
4315 ovli.paddr = i734_buf.paddr;
4316 lcd_conf = i734.lcd_conf;
4317
4318 /* Gate all LCD1 outputs */
4319 REG_FLD_MOD(DISPC_CONFIG, 0x1f, 8, 4);
4320
4321 /* Setup and enable GFX plane */
4322 dispc_ovl_set_channel_out(OMAP_DSS_GFX, OMAP_DSS_CHANNEL_LCD);
4323 dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.timings, false);
4324 dispc_ovl_enable(OMAP_DSS_GFX, true);
4325
4326 /* Set up and enable display manager for LCD1 */
4327 dispc_mgr_setup(OMAP_DSS_CHANNEL_LCD, &i734.mgri);
4328 dispc_calc_clock_rates(dss_get_dispc_clk_rate(),
4329 &lcd_conf.clock_info);
4330 dispc_mgr_set_lcd_config(OMAP_DSS_CHANNEL_LCD, &lcd_conf);
4331 dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.timings);
4332
4333 dispc_clear_irqstatus(framedone_irq);
4334
4335 /* Enable and shut the channel to produce just one frame */
4336 dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, true);
4337 dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, false);
4338
4339 /* Busy wait for framedone. We can't fiddle with irq handlers
4340 * in PM resume. Typically the loop runs less than 5 times and
4341 * waits less than a micro second.
4342 */
4343 count = 0;
4344 while (!(dispc_read_irqstatus() & framedone_irq)) {
4345 if (count++ > 10000) {
4346 dev_err(&dispc.pdev->dev, "%s: framedone timeout\n",
4347 __func__);
4348 break;
4349 }
4350 }
4351 dispc_ovl_enable(OMAP_DSS_GFX, false);
4352
4353 /* Clear all irq bits before continuing */
4354 dispc_clear_irqstatus(0xffffffff);
4355
4356 /* Restore the original state to LCD1 output gates */
4357 REG_FLD_MOD(DISPC_CONFIG, gatestate, 8, 4);
4358}
4359
4053/* DISPC HW IP initialisation */ 4360/* DISPC HW IP initialisation */
4054static int dispc_bind(struct device *dev, struct device *master, void *data) 4361static int dispc_bind(struct device *dev, struct device *master, void *data)
4055{ 4362{
@@ -4067,6 +4374,10 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
4067 if (r) 4374 if (r)
4068 return r; 4375 return r;
4069 4376
4377 r = dispc_errata_i734_wa_init();
4378 if (r)
4379 return r;
4380
4070 dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0); 4381 dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0);
4071 if (!dispc_mem) { 4382 if (!dispc_mem) {
4072 DSSERR("can't get IORESOURCE_MEM DISPC\n"); 4383 DSSERR("can't get IORESOURCE_MEM DISPC\n");
@@ -4100,6 +4411,10 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
4100 } 4411 }
4101 } 4412 }
4102 4413
4414 r = dispc_init_gamma_tables();
4415 if (r)
4416 return r;
4417
4103 pm_runtime_enable(&pdev->dev); 4418 pm_runtime_enable(&pdev->dev);
4104 4419
4105 r = dispc_runtime_get(); 4420 r = dispc_runtime_get();
@@ -4127,6 +4442,8 @@ static void dispc_unbind(struct device *dev, struct device *master,
4127 void *data) 4442 void *data)
4128{ 4443{
4129 pm_runtime_disable(dev); 4444 pm_runtime_disable(dev);
4445
4446 dispc_errata_i734_wa_fini();
4130} 4447}
4131 4448
4132static const struct component_ops dispc_component_ops = { 4449static const struct component_ops dispc_component_ops = {
@@ -4169,7 +4486,11 @@ static int dispc_runtime_resume(struct device *dev)
4169 if (REG_GET(DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) { 4486 if (REG_GET(DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) {
4170 _omap_dispc_initial_config(); 4487 _omap_dispc_initial_config();
4171 4488
4489 dispc_errata_i734_wa();
4490
4172 dispc_restore_context(); 4491 dispc_restore_context();
4492
4493 dispc_restore_gamma_tables();
4173 } 4494 }
4174 4495
4175 dispc.is_enabled = true; 4496 dispc.is_enabled = true;
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.h b/drivers/gpu/drm/omapdrm/dss/dispc.h
index 483744223dd1..bc1d8126ee87 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.h
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.h
@@ -42,6 +42,11 @@
42#define DISPC_MSTANDBY_CTRL 0x0858 42#define DISPC_MSTANDBY_CTRL 0x0858
43#define DISPC_GLOBAL_MFLAG_ATTRIBUTE 0x085C 43#define DISPC_GLOBAL_MFLAG_ATTRIBUTE 0x085C
44 44
45#define DISPC_GAMMA_TABLE0 0x0630
46#define DISPC_GAMMA_TABLE1 0x0634
47#define DISPC_GAMMA_TABLE2 0x0638
48#define DISPC_GAMMA_TABLE3 0x0850
49
45/* DISPC overlay registers */ 50/* DISPC overlay registers */
46#define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \ 51#define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \
47 DISPC_BA0_OFFSET(n)) 52 DISPC_BA0_OFFSET(n))
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
index 038c15b04215..34fad2376f8d 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
@@ -18,8 +18,8 @@
18 */ 18 */
19 19
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <video/omapdss.h>
22 21
22#include "omapdss.h"
23#include "dispc.h" 23#include "dispc.h"
24 24
25static const struct dispc_coef coef3_M8[8] = { 25static const struct dispc_coef coef3_M8[8] = {
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 9f3dd09b0a6c..8dcdd7cf9937 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -28,7 +28,7 @@
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/of.h> 29#include <linux/of.h>
30 30
31#include <video/omapdss.h> 31#include "omapdss.h"
32#include "dss.h" 32#include "dss.h"
33#include "dss_features.h" 33#include "dss_features.h"
34 34
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 97ea60257884..b268295b76cf 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -34,17 +34,15 @@
34#include <linux/clk.h> 34#include <linux/clk.h>
35#include <linux/component.h> 35#include <linux/component.h>
36 36
37#include <video/omapdss.h> 37#include "omapdss.h"
38
39#include "dss.h" 38#include "dss.h"
40#include "dss_features.h" 39#include "dss_features.h"
41 40
42#define HSDIV_DISPC 0
43
44struct dpi_data { 41struct dpi_data {
45 struct platform_device *pdev; 42 struct platform_device *pdev;
46 43
47 struct regulator *vdds_dsi_reg; 44 struct regulator *vdds_dsi_reg;
45 enum dss_clk_source clk_src;
48 struct dss_pll *pll; 46 struct dss_pll *pll;
49 47
50 struct mutex lock; 48 struct mutex lock;
@@ -69,7 +67,7 @@ static struct dpi_data *dpi_get_data_from_pdev(struct platform_device *pdev)
69 return dev_get_drvdata(&pdev->dev); 67 return dev_get_drvdata(&pdev->dev);
70} 68}
71 69
72static struct dss_pll *dpi_get_pll(enum omap_channel channel) 70static enum dss_clk_source dpi_get_clk_src(enum omap_channel channel)
73{ 71{
74 /* 72 /*
75 * XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL 73 * XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL
@@ -83,64 +81,51 @@ static struct dss_pll *dpi_get_pll(enum omap_channel channel)
83 case OMAPDSS_VER_OMAP3630: 81 case OMAPDSS_VER_OMAP3630:
84 case OMAPDSS_VER_AM35xx: 82 case OMAPDSS_VER_AM35xx:
85 case OMAPDSS_VER_AM43xx: 83 case OMAPDSS_VER_AM43xx:
86 return NULL; 84 return DSS_CLK_SRC_FCK;
87 85
88 case OMAPDSS_VER_OMAP4430_ES1: 86 case OMAPDSS_VER_OMAP4430_ES1:
89 case OMAPDSS_VER_OMAP4430_ES2: 87 case OMAPDSS_VER_OMAP4430_ES2:
90 case OMAPDSS_VER_OMAP4: 88 case OMAPDSS_VER_OMAP4:
91 switch (channel) { 89 switch (channel) {
92 case OMAP_DSS_CHANNEL_LCD: 90 case OMAP_DSS_CHANNEL_LCD:
93 return dss_pll_find("dsi0"); 91 return DSS_CLK_SRC_PLL1_1;
94 case OMAP_DSS_CHANNEL_LCD2: 92 case OMAP_DSS_CHANNEL_LCD2:
95 return dss_pll_find("dsi1"); 93 return DSS_CLK_SRC_PLL2_1;
96 default: 94 default:
97 return NULL; 95 return DSS_CLK_SRC_FCK;
98 } 96 }
99 97
100 case OMAPDSS_VER_OMAP5: 98 case OMAPDSS_VER_OMAP5:
101 switch (channel) { 99 switch (channel) {
102 case OMAP_DSS_CHANNEL_LCD: 100 case OMAP_DSS_CHANNEL_LCD:
103 return dss_pll_find("dsi0"); 101 return DSS_CLK_SRC_PLL1_1;
104 case OMAP_DSS_CHANNEL_LCD3: 102 case OMAP_DSS_CHANNEL_LCD3:
105 return dss_pll_find("dsi1"); 103 return DSS_CLK_SRC_PLL2_1;
104 case OMAP_DSS_CHANNEL_LCD2:
106 default: 105 default:
107 return NULL; 106 return DSS_CLK_SRC_FCK;
108 } 107 }
109 108
110 case OMAPDSS_VER_DRA7xx: 109 case OMAPDSS_VER_DRA7xx:
111 switch (channel) { 110 switch (channel) {
112 case OMAP_DSS_CHANNEL_LCD: 111 case OMAP_DSS_CHANNEL_LCD:
112 return DSS_CLK_SRC_PLL1_1;
113 case OMAP_DSS_CHANNEL_LCD2: 113 case OMAP_DSS_CHANNEL_LCD2:
114 return dss_pll_find("video0"); 114 return DSS_CLK_SRC_PLL1_3;
115 case OMAP_DSS_CHANNEL_LCD3: 115 case OMAP_DSS_CHANNEL_LCD3:
116 return dss_pll_find("video1"); 116 return DSS_CLK_SRC_PLL2_1;
117 default: 117 default:
118 return NULL; 118 return DSS_CLK_SRC_FCK;
119 } 119 }
120 120
121 default: 121 default:
122 return NULL; 122 return DSS_CLK_SRC_FCK;
123 }
124}
125
126static enum omap_dss_clk_source dpi_get_alt_clk_src(enum omap_channel channel)
127{
128 switch (channel) {
129 case OMAP_DSS_CHANNEL_LCD:
130 return OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC;
131 case OMAP_DSS_CHANNEL_LCD2:
132 return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
133 case OMAP_DSS_CHANNEL_LCD3:
134 return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
135 default:
136 /* this shouldn't happen */
137 WARN_ON(1);
138 return OMAP_DSS_CLK_SRC_FCK;
139 } 123 }
140} 124}
141 125
142struct dpi_clk_calc_ctx { 126struct dpi_clk_calc_ctx {
143 struct dss_pll *pll; 127 struct dss_pll *pll;
128 unsigned clkout_idx;
144 129
145 /* inputs */ 130 /* inputs */
146 131
@@ -148,7 +133,7 @@ struct dpi_clk_calc_ctx {
148 133
149 /* outputs */ 134 /* outputs */
150 135
151 struct dss_pll_clock_info dsi_cinfo; 136 struct dss_pll_clock_info pll_cinfo;
152 unsigned long fck; 137 unsigned long fck;
153 struct dispc_clock_info dispc_cinfo; 138 struct dispc_clock_info dispc_cinfo;
154}; 139};
@@ -193,8 +178,8 @@ static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
193 if (m_dispc > 1 && m_dispc % 2 != 0 && ctx->pck_min >= 100000000) 178 if (m_dispc > 1 && m_dispc % 2 != 0 && ctx->pck_min >= 100000000)
194 return false; 179 return false;
195 180
196 ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc; 181 ctx->pll_cinfo.mX[ctx->clkout_idx] = m_dispc;
197 ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc; 182 ctx->pll_cinfo.clkout[ctx->clkout_idx] = dispc;
198 183
199 return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max, 184 return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max,
200 dpi_calc_dispc_cb, ctx); 185 dpi_calc_dispc_cb, ctx);
@@ -207,12 +192,12 @@ static bool dpi_calc_pll_cb(int n, int m, unsigned long fint,
207{ 192{
208 struct dpi_clk_calc_ctx *ctx = data; 193 struct dpi_clk_calc_ctx *ctx = data;
209 194
210 ctx->dsi_cinfo.n = n; 195 ctx->pll_cinfo.n = n;
211 ctx->dsi_cinfo.m = m; 196 ctx->pll_cinfo.m = m;
212 ctx->dsi_cinfo.fint = fint; 197 ctx->pll_cinfo.fint = fint;
213 ctx->dsi_cinfo.clkdco = clkdco; 198 ctx->pll_cinfo.clkdco = clkdco;
214 199
215 return dss_pll_hsdiv_calc(ctx->pll, clkdco, 200 return dss_pll_hsdiv_calc_a(ctx->pll, clkdco,
216 ctx->pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), 201 ctx->pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
217 dpi_calc_hsdiv_cb, ctx); 202 dpi_calc_hsdiv_cb, ctx);
218} 203}
@@ -227,25 +212,39 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data)
227 dpi_calc_dispc_cb, ctx); 212 dpi_calc_dispc_cb, ctx);
228} 213}
229 214
230static bool dpi_dsi_clk_calc(struct dpi_data *dpi, unsigned long pck, 215static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
231 struct dpi_clk_calc_ctx *ctx) 216 struct dpi_clk_calc_ctx *ctx)
232{ 217{
233 unsigned long clkin; 218 unsigned long clkin;
234 unsigned long pll_min, pll_max;
235 219
236 memset(ctx, 0, sizeof(*ctx)); 220 memset(ctx, 0, sizeof(*ctx));
237 ctx->pll = dpi->pll; 221 ctx->pll = dpi->pll;
238 ctx->pck_min = pck - 1000; 222 ctx->clkout_idx = dss_pll_get_clkout_idx_for_src(dpi->clk_src);
239 ctx->pck_max = pck + 1000;
240 223
241 pll_min = 0; 224 clkin = clk_get_rate(dpi->pll->clkin);
242 pll_max = 0;
243 225
244 clkin = clk_get_rate(ctx->pll->clkin); 226 if (dpi->pll->hw->type == DSS_PLL_TYPE_A) {
227 unsigned long pll_min, pll_max;
245 228
246 return dss_pll_calc(ctx->pll, clkin, 229 ctx->pck_min = pck - 1000;
247 pll_min, pll_max, 230 ctx->pck_max = pck + 1000;
248 dpi_calc_pll_cb, ctx); 231
232 pll_min = 0;
233 pll_max = 0;
234
235 return dss_pll_calc_a(ctx->pll, clkin,
236 pll_min, pll_max,
237 dpi_calc_pll_cb, ctx);
238 } else { /* DSS_PLL_TYPE_B */
239 dss_pll_calc_b(dpi->pll, clkin, pck, &ctx->pll_cinfo);
240
241 ctx->dispc_cinfo.lck_div = 1;
242 ctx->dispc_cinfo.pck_div = 1;
243 ctx->dispc_cinfo.lck = ctx->pll_cinfo.clkout[0];
244 ctx->dispc_cinfo.pck = ctx->dispc_cinfo.lck;
245
246 return true;
247 }
249} 248}
250 249
251static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx) 250static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
@@ -279,7 +278,7 @@ static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
279 278
280 279
281 280
282static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel, 281static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel,
283 unsigned long pck_req, unsigned long *fck, int *lck_div, 282 unsigned long pck_req, unsigned long *fck, int *lck_div,
284 int *pck_div) 283 int *pck_div)
285{ 284{
@@ -287,20 +286,19 @@ static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel,
287 int r; 286 int r;
288 bool ok; 287 bool ok;
289 288
290 ok = dpi_dsi_clk_calc(dpi, pck_req, &ctx); 289 ok = dpi_pll_clk_calc(dpi, pck_req, &ctx);
291 if (!ok) 290 if (!ok)
292 return -EINVAL; 291 return -EINVAL;
293 292
294 r = dss_pll_set_config(dpi->pll, &ctx.dsi_cinfo); 293 r = dss_pll_set_config(dpi->pll, &ctx.pll_cinfo);
295 if (r) 294 if (r)
296 return r; 295 return r;
297 296
298 dss_select_lcd_clk_source(channel, 297 dss_select_lcd_clk_source(channel, dpi->clk_src);
299 dpi_get_alt_clk_src(channel));
300 298
301 dpi->mgr_config.clock_info = ctx.dispc_cinfo; 299 dpi->mgr_config.clock_info = ctx.dispc_cinfo;
302 300
303 *fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC]; 301 *fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
304 *lck_div = ctx.dispc_cinfo.lck_div; 302 *lck_div = ctx.dispc_cinfo.lck_div;
305 *pck_div = ctx.dispc_cinfo.pck_div; 303 *pck_div = ctx.dispc_cinfo.pck_div;
306 304
@@ -342,7 +340,7 @@ static int dpi_set_mode(struct dpi_data *dpi)
342 int r = 0; 340 int r = 0;
343 341
344 if (dpi->pll) 342 if (dpi->pll)
345 r = dpi_set_dsi_clk(dpi, channel, t->pixelclock, &fck, 343 r = dpi_set_pll_clk(dpi, channel, t->pixelclock, &fck,
346 &lck_div, &pck_div); 344 &lck_div, &pck_div);
347 else 345 else
348 r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck, 346 r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck,
@@ -419,7 +417,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
419 if (dpi->pll) { 417 if (dpi->pll) {
420 r = dss_pll_enable(dpi->pll); 418 r = dss_pll_enable(dpi->pll);
421 if (r) 419 if (r)
422 goto err_dsi_pll_init; 420 goto err_pll_init;
423 } 421 }
424 422
425 r = dpi_set_mode(dpi); 423 r = dpi_set_mode(dpi);
@@ -442,7 +440,7 @@ err_mgr_enable:
442err_set_mode: 440err_set_mode:
443 if (dpi->pll) 441 if (dpi->pll)
444 dss_pll_disable(dpi->pll); 442 dss_pll_disable(dpi->pll);
445err_dsi_pll_init: 443err_pll_init:
446err_src_sel: 444err_src_sel:
447 dispc_runtime_put(); 445 dispc_runtime_put();
448err_get_dispc: 446err_get_dispc:
@@ -465,7 +463,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
465 dss_mgr_disable(channel); 463 dss_mgr_disable(channel);
466 464
467 if (dpi->pll) { 465 if (dpi->pll) {
468 dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK); 466 dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
469 dss_pll_disable(dpi->pll); 467 dss_pll_disable(dpi->pll);
470 } 468 }
471 469
@@ -524,11 +522,11 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
524 return -EINVAL; 522 return -EINVAL;
525 523
526 if (dpi->pll) { 524 if (dpi->pll) {
527 ok = dpi_dsi_clk_calc(dpi, timings->pixelclock, &ctx); 525 ok = dpi_pll_clk_calc(dpi, timings->pixelclock, &ctx);
528 if (!ok) 526 if (!ok)
529 return -EINVAL; 527 return -EINVAL;
530 528
531 fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC]; 529 fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
532 } else { 530 } else {
533 ok = dpi_dss_clk_calc(timings->pixelclock, &ctx); 531 ok = dpi_dss_clk_calc(timings->pixelclock, &ctx);
534 if (!ok) 532 if (!ok)
@@ -558,7 +556,7 @@ static void dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
558 mutex_unlock(&dpi->lock); 556 mutex_unlock(&dpi->lock);
559} 557}
560 558
561static int dpi_verify_dsi_pll(struct dss_pll *pll) 559static int dpi_verify_pll(struct dss_pll *pll)
562{ 560{
563 int r; 561 int r;
564 562
@@ -602,16 +600,14 @@ static void dpi_init_pll(struct dpi_data *dpi)
602 if (dpi->pll) 600 if (dpi->pll)
603 return; 601 return;
604 602
605 pll = dpi_get_pll(dpi->output.dispc_channel); 603 dpi->clk_src = dpi_get_clk_src(dpi->output.dispc_channel);
604
605 pll = dss_pll_find_by_src(dpi->clk_src);
606 if (!pll) 606 if (!pll)
607 return; 607 return;
608 608
609 /* On DRA7 we need to set a mux to use the PLL */ 609 if (dpi_verify_pll(pll)) {
610 if (omapdss_get_version() == OMAPDSS_VER_DRA7xx) 610 DSSWARN("PLL not operational\n");
611 dss_ctrl_pll_set_control_mux(pll->id, dpi->output.dispc_channel);
612
613 if (dpi_verify_dsi_pll(pll)) {
614 DSSWARN("DSI PLL not operational\n");
615 return; 611 return;
616 } 612 }
617 613
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 8730646a0cbb..6f45e9d00b41 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -42,9 +42,9 @@
42#include <linux/of_platform.h> 42#include <linux/of_platform.h>
43#include <linux/component.h> 43#include <linux/component.h>
44 44
45#include <video/omapdss.h>
46#include <video/mipi_display.h> 45#include <video/mipi_display.h>
47 46
47#include "omapdss.h"
48#include "dss.h" 48#include "dss.h"
49#include "dss_features.h" 49#include "dss_features.h"
50 50
@@ -1180,15 +1180,6 @@ static int dsi_regulator_init(struct platform_device *dsidev)
1180 return PTR_ERR(vdds_dsi); 1180 return PTR_ERR(vdds_dsi);
1181 } 1181 }
1182 1182
1183 if (regulator_can_change_voltage(vdds_dsi)) {
1184 r = regulator_set_voltage(vdds_dsi, 1800000, 1800000);
1185 if (r) {
1186 devm_regulator_put(vdds_dsi);
1187 DSSERR("can't set the DSI regulator voltage\n");
1188 return r;
1189 }
1190 }
1191
1192 dsi->vdds_dsi_reg = vdds_dsi; 1183 dsi->vdds_dsi_reg = vdds_dsi;
1193 1184
1194 return 0; 1185 return 0;
@@ -1271,7 +1262,7 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1271 unsigned long r; 1262 unsigned long r;
1272 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1263 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1273 1264
1274 if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) { 1265 if (dss_get_dsi_clk_source(dsi->module_id) == DSS_CLK_SRC_FCK) {
1275 /* DSI FCLK source is DSS_CLK_FCK */ 1266 /* DSI FCLK source is DSS_CLK_FCK */
1276 r = clk_get_rate(dsi->dss_clk); 1267 r = clk_get_rate(dsi->dss_clk);
1277 } else { 1268 } else {
@@ -1484,7 +1475,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1484{ 1475{
1485 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1476 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1486 struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; 1477 struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
1487 enum omap_dss_clk_source dispc_clk_src, dsi_clk_src; 1478 enum dss_clk_source dispc_clk_src, dsi_clk_src;
1488 int dsi_module = dsi->module_id; 1479 int dsi_module = dsi->module_id;
1489 struct dss_pll *pll = &dsi->pll; 1480 struct dss_pll *pll = &dsi->pll;
1490 1481
@@ -1504,28 +1495,27 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1504 cinfo->clkdco, cinfo->m); 1495 cinfo->clkdco, cinfo->m);
1505 1496
1506 seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n", 1497 seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n",
1507 dss_feat_get_clk_source_name(dsi_module == 0 ? 1498 dss_get_clk_source_name(dsi_module == 0 ?
1508 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC : 1499 DSS_CLK_SRC_PLL1_1 :
1509 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC), 1500 DSS_CLK_SRC_PLL2_1),
1510 cinfo->clkout[HSDIV_DISPC], 1501 cinfo->clkout[HSDIV_DISPC],
1511 cinfo->mX[HSDIV_DISPC], 1502 cinfo->mX[HSDIV_DISPC],
1512 dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ? 1503 dispc_clk_src == DSS_CLK_SRC_FCK ?
1513 "off" : "on"); 1504 "off" : "on");
1514 1505
1515 seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n", 1506 seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n",
1516 dss_feat_get_clk_source_name(dsi_module == 0 ? 1507 dss_get_clk_source_name(dsi_module == 0 ?
1517 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI : 1508 DSS_CLK_SRC_PLL1_2 :
1518 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI), 1509 DSS_CLK_SRC_PLL2_2),
1519 cinfo->clkout[HSDIV_DSI], 1510 cinfo->clkout[HSDIV_DSI],
1520 cinfo->mX[HSDIV_DSI], 1511 cinfo->mX[HSDIV_DSI],
1521 dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ? 1512 dsi_clk_src == DSS_CLK_SRC_FCK ?
1522 "off" : "on"); 1513 "off" : "on");
1523 1514
1524 seq_printf(s, "- DSI%d -\n", dsi_module + 1); 1515 seq_printf(s, "- DSI%d -\n", dsi_module + 1);
1525 1516
1526 seq_printf(s, "dsi fclk source = %s (%s)\n", 1517 seq_printf(s, "dsi fclk source = %s\n",
1527 dss_get_generic_clk_source_name(dsi_clk_src), 1518 dss_get_clk_source_name(dsi_clk_src));
1528 dss_feat_get_clk_source_name(dsi_clk_src));
1529 1519
1530 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev)); 1520 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
1531 1521
@@ -4111,8 +4101,8 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
4111 int r; 4101 int r;
4112 4102
4113 dss_select_lcd_clk_source(channel, dsi->module_id == 0 ? 4103 dss_select_lcd_clk_source(channel, dsi->module_id == 0 ?
4114 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC : 4104 DSS_CLK_SRC_PLL1_1 :
4115 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC); 4105 DSS_CLK_SRC_PLL2_1);
4116 4106
4117 if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) { 4107 if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
4118 r = dss_mgr_register_framedone_handler(channel, 4108 r = dss_mgr_register_framedone_handler(channel,
@@ -4159,7 +4149,7 @@ err1:
4159 dss_mgr_unregister_framedone_handler(channel, 4149 dss_mgr_unregister_framedone_handler(channel,
4160 dsi_framedone_irq_callback, dsidev); 4150 dsi_framedone_irq_callback, dsidev);
4161err: 4151err:
4162 dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK); 4152 dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
4163 return r; 4153 return r;
4164} 4154}
4165 4155
@@ -4172,7 +4162,7 @@ static void dsi_display_uninit_dispc(struct platform_device *dsidev,
4172 dss_mgr_unregister_framedone_handler(channel, 4162 dss_mgr_unregister_framedone_handler(channel,
4173 dsi_framedone_irq_callback, dsidev); 4163 dsi_framedone_irq_callback, dsidev);
4174 4164
4175 dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK); 4165 dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
4176} 4166}
4177 4167
4178static int dsi_configure_dsi_clocks(struct platform_device *dsidev) 4168static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
@@ -4206,8 +4196,8 @@ static int dsi_display_init_dsi(struct platform_device *dsidev)
4206 goto err1; 4196 goto err1;
4207 4197
4208 dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ? 4198 dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ?
4209 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI : 4199 DSS_CLK_SRC_PLL1_2 :
4210 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI); 4200 DSS_CLK_SRC_PLL2_2);
4211 4201
4212 DSSDBG("PLL OK\n"); 4202 DSSDBG("PLL OK\n");
4213 4203
@@ -4239,7 +4229,7 @@ static int dsi_display_init_dsi(struct platform_device *dsidev)
4239err3: 4229err3:
4240 dsi_cio_uninit(dsidev); 4230 dsi_cio_uninit(dsidev);
4241err2: 4231err2:
4242 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); 4232 dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK);
4243err1: 4233err1:
4244 dss_pll_disable(&dsi->pll); 4234 dss_pll_disable(&dsi->pll);
4245err0: 4235err0:
@@ -4261,7 +4251,7 @@ static void dsi_display_uninit_dsi(struct platform_device *dsidev,
4261 dsi_vc_enable(dsidev, 2, 0); 4251 dsi_vc_enable(dsidev, 2, 0);
4262 dsi_vc_enable(dsidev, 3, 0); 4252 dsi_vc_enable(dsidev, 3, 0);
4263 4253
4264 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); 4254 dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK);
4265 dsi_cio_uninit(dsidev); 4255 dsi_cio_uninit(dsidev);
4266 dsi_pll_uninit(dsidev, disconnect_lanes); 4256 dsi_pll_uninit(dsidev, disconnect_lanes);
4267} 4257}
@@ -4462,7 +4452,7 @@ static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint,
4462 ctx->dsi_cinfo.fint = fint; 4452 ctx->dsi_cinfo.fint = fint;
4463 ctx->dsi_cinfo.clkdco = clkdco; 4453 ctx->dsi_cinfo.clkdco = clkdco;
4464 4454
4465 return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min, 4455 return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
4466 dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), 4456 dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
4467 dsi_cm_calc_hsdiv_cb, ctx); 4457 dsi_cm_calc_hsdiv_cb, ctx);
4468} 4458}
@@ -4501,7 +4491,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
4501 pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4); 4491 pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4);
4502 pll_max = cfg->hs_clk_max * 4; 4492 pll_max = cfg->hs_clk_max * 4;
4503 4493
4504 return dss_pll_calc(ctx->pll, clkin, 4494 return dss_pll_calc_a(ctx->pll, clkin,
4505 pll_min, pll_max, 4495 pll_min, pll_max,
4506 dsi_cm_calc_pll_cb, ctx); 4496 dsi_cm_calc_pll_cb, ctx);
4507} 4497}
@@ -4760,7 +4750,7 @@ static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint,
4760 ctx->dsi_cinfo.fint = fint; 4750 ctx->dsi_cinfo.fint = fint;
4761 ctx->dsi_cinfo.clkdco = clkdco; 4751 ctx->dsi_cinfo.clkdco = clkdco;
4762 4752
4763 return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min, 4753 return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
4764 dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), 4754 dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
4765 dsi_vm_calc_hsdiv_cb, ctx); 4755 dsi_vm_calc_hsdiv_cb, ctx);
4766} 4756}
@@ -4802,7 +4792,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
4802 pll_max = byteclk_max * 4 * 4; 4792 pll_max = byteclk_max * 4 * 4;
4803 } 4793 }
4804 4794
4805 return dss_pll_calc(ctx->pll, clkin, 4795 return dss_pll_calc_a(ctx->pll, clkin,
4806 pll_min, pll_max, 4796 pll_min, pll_max,
4807 dsi_vm_calc_pll_cb, ctx); 4797 dsi_vm_calc_pll_cb, ctx);
4808} 4798}
@@ -5148,6 +5138,8 @@ static const struct dss_pll_ops dsi_pll_ops = {
5148}; 5138};
5149 5139
5150static const struct dss_pll_hw dss_omap3_dsi_pll_hw = { 5140static const struct dss_pll_hw dss_omap3_dsi_pll_hw = {
5141 .type = DSS_PLL_TYPE_A,
5142
5151 .n_max = (1 << 7) - 1, 5143 .n_max = (1 << 7) - 1,
5152 .m_max = (1 << 11) - 1, 5144 .m_max = (1 << 11) - 1,
5153 .mX_max = (1 << 4) - 1, 5145 .mX_max = (1 << 4) - 1,
@@ -5173,6 +5165,8 @@ static const struct dss_pll_hw dss_omap3_dsi_pll_hw = {
5173}; 5165};
5174 5166
5175static const struct dss_pll_hw dss_omap4_dsi_pll_hw = { 5167static const struct dss_pll_hw dss_omap4_dsi_pll_hw = {
5168 .type = DSS_PLL_TYPE_A,
5169
5176 .n_max = (1 << 8) - 1, 5170 .n_max = (1 << 8) - 1,
5177 .m_max = (1 << 12) - 1, 5171 .m_max = (1 << 12) - 1,
5178 .mX_max = (1 << 5) - 1, 5172 .mX_max = (1 << 5) - 1,
@@ -5198,6 +5192,8 @@ static const struct dss_pll_hw dss_omap4_dsi_pll_hw = {
5198}; 5192};
5199 5193
5200static const struct dss_pll_hw dss_omap5_dsi_pll_hw = { 5194static const struct dss_pll_hw dss_omap5_dsi_pll_hw = {
5195 .type = DSS_PLL_TYPE_A,
5196
5201 .n_max = (1 << 8) - 1, 5197 .n_max = (1 << 8) - 1,
5202 .m_max = (1 << 12) - 1, 5198 .m_max = (1 << 12) - 1,
5203 .mX_max = (1 << 5) - 1, 5199 .mX_max = (1 << 5) - 1,
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
index bf407b6ba15c..dfd4e9621e3b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c
@@ -18,8 +18,7 @@
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20 20
21#include <video/omapdss.h> 21#include "omapdss.h"
22
23#include "dss.h" 22#include "dss.h"
24 23
25struct device_node * 24struct device_node *
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index f95ff319e68e..14887d5b02e5 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -30,6 +30,7 @@
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/seq_file.h> 31#include <linux/seq_file.h>
32#include <linux/clk.h> 32#include <linux/clk.h>
33#include <linux/pinctrl/consumer.h>
33#include <linux/platform_device.h> 34#include <linux/platform_device.h>
34#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
35#include <linux/gfp.h> 36#include <linux/gfp.h>
@@ -41,8 +42,7 @@
41#include <linux/suspend.h> 42#include <linux/suspend.h>
42#include <linux/component.h> 43#include <linux/component.h>
43 44
44#include <video/omapdss.h> 45#include "omapdss.h"
45
46#include "dss.h" 46#include "dss.h"
47#include "dss_features.h" 47#include "dss_features.h"
48 48
@@ -75,6 +75,8 @@ struct dss_features {
75 const enum omap_display_type *ports; 75 const enum omap_display_type *ports;
76 int num_ports; 76 int num_ports;
77 int (*dpi_select_source)(int port, enum omap_channel channel); 77 int (*dpi_select_source)(int port, enum omap_channel channel);
78 int (*select_lcd_source)(enum omap_channel channel,
79 enum dss_clk_source clk_src);
78}; 80};
79 81
80static struct { 82static struct {
@@ -91,9 +93,9 @@ static struct {
91 unsigned long cache_prate; 93 unsigned long cache_prate;
92 struct dispc_clock_info cache_dispc_cinfo; 94 struct dispc_clock_info cache_dispc_cinfo;
93 95
94 enum omap_dss_clk_source dsi_clk_source[MAX_NUM_DSI]; 96 enum dss_clk_source dsi_clk_source[MAX_NUM_DSI];
95 enum omap_dss_clk_source dispc_clk_source; 97 enum dss_clk_source dispc_clk_source;
96 enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS]; 98 enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
97 99
98 bool ctx_valid; 100 bool ctx_valid;
99 u32 ctx[DSS_SZ_REGS / sizeof(u32)]; 101 u32 ctx[DSS_SZ_REGS / sizeof(u32)];
@@ -105,11 +107,14 @@ static struct {
105} dss; 107} dss;
106 108
107static const char * const dss_generic_clk_source_names[] = { 109static const char * const dss_generic_clk_source_names[] = {
108 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC", 110 [DSS_CLK_SRC_FCK] = "FCK",
109 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI", 111 [DSS_CLK_SRC_PLL1_1] = "PLL1:1",
110 [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK", 112 [DSS_CLK_SRC_PLL1_2] = "PLL1:2",
111 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DSI_PLL2_HSDIV_DISPC", 113 [DSS_CLK_SRC_PLL1_3] = "PLL1:3",
112 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DSI_PLL2_HSDIV_DSI", 114 [DSS_CLK_SRC_PLL2_1] = "PLL2:1",
115 [DSS_CLK_SRC_PLL2_2] = "PLL2:2",
116 [DSS_CLK_SRC_PLL2_3] = "PLL2:3",
117 [DSS_CLK_SRC_HDMI_PLL] = "HDMI PLL",
113}; 118};
114 119
115static bool dss_initialized; 120static bool dss_initialized;
@@ -202,68 +207,70 @@ void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable)
202 1 << shift, val << shift); 207 1 << shift, val << shift);
203} 208}
204 209
205void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id, 210static int dss_ctrl_pll_set_control_mux(enum dss_clk_source clk_src,
206 enum omap_channel channel) 211 enum omap_channel channel)
207{ 212{
208 unsigned shift, val; 213 unsigned shift, val;
209 214
210 if (!dss.syscon_pll_ctrl) 215 if (!dss.syscon_pll_ctrl)
211 return; 216 return -EINVAL;
212 217
213 switch (channel) { 218 switch (channel) {
214 case OMAP_DSS_CHANNEL_LCD: 219 case OMAP_DSS_CHANNEL_LCD:
215 shift = 3; 220 shift = 3;
216 221
217 switch (pll_id) { 222 switch (clk_src) {
218 case DSS_PLL_VIDEO1: 223 case DSS_CLK_SRC_PLL1_1:
219 val = 0; break; 224 val = 0; break;
220 case DSS_PLL_HDMI: 225 case DSS_CLK_SRC_HDMI_PLL:
221 val = 1; break; 226 val = 1; break;
222 default: 227 default:
223 DSSERR("error in PLL mux config for LCD\n"); 228 DSSERR("error in PLL mux config for LCD\n");
224 return; 229 return -EINVAL;
225 } 230 }
226 231
227 break; 232 break;
228 case OMAP_DSS_CHANNEL_LCD2: 233 case OMAP_DSS_CHANNEL_LCD2:
229 shift = 5; 234 shift = 5;
230 235
231 switch (pll_id) { 236 switch (clk_src) {
232 case DSS_PLL_VIDEO1: 237 case DSS_CLK_SRC_PLL1_3:
233 val = 0; break; 238 val = 0; break;
234 case DSS_PLL_VIDEO2: 239 case DSS_CLK_SRC_PLL2_3:
235 val = 1; break; 240 val = 1; break;
236 case DSS_PLL_HDMI: 241 case DSS_CLK_SRC_HDMI_PLL:
237 val = 2; break; 242 val = 2; break;
238 default: 243 default:
239 DSSERR("error in PLL mux config for LCD2\n"); 244 DSSERR("error in PLL mux config for LCD2\n");
240 return; 245 return -EINVAL;
241 } 246 }
242 247
243 break; 248 break;
244 case OMAP_DSS_CHANNEL_LCD3: 249 case OMAP_DSS_CHANNEL_LCD3:
245 shift = 7; 250 shift = 7;
246 251
247 switch (pll_id) { 252 switch (clk_src) {
248 case DSS_PLL_VIDEO1: 253 case DSS_CLK_SRC_PLL2_1:
249 val = 1; break;
250 case DSS_PLL_VIDEO2:
251 val = 0; break; 254 val = 0; break;
252 case DSS_PLL_HDMI: 255 case DSS_CLK_SRC_PLL1_3:
256 val = 1; break;
257 case DSS_CLK_SRC_HDMI_PLL:
253 val = 2; break; 258 val = 2; break;
254 default: 259 default:
255 DSSERR("error in PLL mux config for LCD3\n"); 260 DSSERR("error in PLL mux config for LCD3\n");
256 return; 261 return -EINVAL;
257 } 262 }
258 263
259 break; 264 break;
260 default: 265 default:
261 DSSERR("error in PLL mux config\n"); 266 DSSERR("error in PLL mux config\n");
262 return; 267 return -EINVAL;
263 } 268 }
264 269
265 regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset, 270 regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset,
266 0x3 << shift, val << shift); 271 0x3 << shift, val << shift);
272
273 return 0;
267} 274}
268 275
269void dss_sdi_init(int datapairs) 276void dss_sdi_init(int datapairs)
@@ -353,14 +360,14 @@ void dss_sdi_disable(void)
353 REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */ 360 REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
354} 361}
355 362
356const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src) 363const char *dss_get_clk_source_name(enum dss_clk_source clk_src)
357{ 364{
358 return dss_generic_clk_source_names[clk_src]; 365 return dss_generic_clk_source_names[clk_src];
359} 366}
360 367
361void dss_dump_clocks(struct seq_file *s) 368void dss_dump_clocks(struct seq_file *s)
362{ 369{
363 const char *fclk_name, *fclk_real_name; 370 const char *fclk_name;
364 unsigned long fclk_rate; 371 unsigned long fclk_rate;
365 372
366 if (dss_runtime_get()) 373 if (dss_runtime_get())
@@ -368,12 +375,11 @@ void dss_dump_clocks(struct seq_file *s)
368 375
369 seq_printf(s, "- DSS -\n"); 376 seq_printf(s, "- DSS -\n");
370 377
371 fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK); 378 fclk_name = dss_get_clk_source_name(DSS_CLK_SRC_FCK);
372 fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
373 fclk_rate = clk_get_rate(dss.dss_clk); 379 fclk_rate = clk_get_rate(dss.dss_clk);
374 380
375 seq_printf(s, "%s (%s) = %lu\n", 381 seq_printf(s, "%s = %lu\n",
376 fclk_name, fclk_real_name, 382 fclk_name,
377 fclk_rate); 383 fclk_rate);
378 384
379 dss_runtime_put(); 385 dss_runtime_put();
@@ -402,19 +408,42 @@ static void dss_dump_regs(struct seq_file *s)
402#undef DUMPREG 408#undef DUMPREG
403} 409}
404 410
405static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src) 411static int dss_get_channel_index(enum omap_channel channel)
412{
413 switch (channel) {
414 case OMAP_DSS_CHANNEL_LCD:
415 return 0;
416 case OMAP_DSS_CHANNEL_LCD2:
417 return 1;
418 case OMAP_DSS_CHANNEL_LCD3:
419 return 2;
420 default:
421 WARN_ON(1);
422 return 0;
423 }
424}
425
426static void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
406{ 427{
407 int b; 428 int b;
408 u8 start, end; 429 u8 start, end;
409 430
431 /*
432 * We always use PRCM clock as the DISPC func clock, except on DSS3,
433 * where we don't have separate DISPC and LCD clock sources.
434 */
435 if (WARN_ON(dss_has_feature(FEAT_LCD_CLK_SRC) &&
436 clk_src != DSS_CLK_SRC_FCK))
437 return;
438
410 switch (clk_src) { 439 switch (clk_src) {
411 case OMAP_DSS_CLK_SRC_FCK: 440 case DSS_CLK_SRC_FCK:
412 b = 0; 441 b = 0;
413 break; 442 break;
414 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 443 case DSS_CLK_SRC_PLL1_1:
415 b = 1; 444 b = 1;
416 break; 445 break;
417 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: 446 case DSS_CLK_SRC_PLL2_1:
418 b = 2; 447 b = 2;
419 break; 448 break;
420 default: 449 default:
@@ -430,19 +459,19 @@ static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
430} 459}
431 460
432void dss_select_dsi_clk_source(int dsi_module, 461void dss_select_dsi_clk_source(int dsi_module,
433 enum omap_dss_clk_source clk_src) 462 enum dss_clk_source clk_src)
434{ 463{
435 int b, pos; 464 int b, pos;
436 465
437 switch (clk_src) { 466 switch (clk_src) {
438 case OMAP_DSS_CLK_SRC_FCK: 467 case DSS_CLK_SRC_FCK:
439 b = 0; 468 b = 0;
440 break; 469 break;
441 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI: 470 case DSS_CLK_SRC_PLL1_2:
442 BUG_ON(dsi_module != 0); 471 BUG_ON(dsi_module != 0);
443 b = 1; 472 b = 1;
444 break; 473 break;
445 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI: 474 case DSS_CLK_SRC_PLL2_2:
446 BUG_ON(dsi_module != 1); 475 BUG_ON(dsi_module != 1);
447 b = 1; 476 b = 1;
448 break; 477 break;
@@ -457,59 +486,125 @@ void dss_select_dsi_clk_source(int dsi_module,
457 dss.dsi_clk_source[dsi_module] = clk_src; 486 dss.dsi_clk_source[dsi_module] = clk_src;
458} 487}
459 488
489static int dss_lcd_clk_mux_dra7(enum omap_channel channel,
490 enum dss_clk_source clk_src)
491{
492 const u8 ctrl_bits[] = {
493 [OMAP_DSS_CHANNEL_LCD] = 0,
494 [OMAP_DSS_CHANNEL_LCD2] = 12,
495 [OMAP_DSS_CHANNEL_LCD3] = 19,
496 };
497
498 u8 ctrl_bit = ctrl_bits[channel];
499 int r;
500
501 if (clk_src == DSS_CLK_SRC_FCK) {
502 /* LCDx_CLK_SWITCH */
503 REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
504 return -EINVAL;
505 }
506
507 r = dss_ctrl_pll_set_control_mux(clk_src, channel);
508 if (r)
509 return r;
510
511 REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
512
513 return 0;
514}
515
516static int dss_lcd_clk_mux_omap5(enum omap_channel channel,
517 enum dss_clk_source clk_src)
518{
519 const u8 ctrl_bits[] = {
520 [OMAP_DSS_CHANNEL_LCD] = 0,
521 [OMAP_DSS_CHANNEL_LCD2] = 12,
522 [OMAP_DSS_CHANNEL_LCD3] = 19,
523 };
524 const enum dss_clk_source allowed_plls[] = {
525 [OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1,
526 [OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_FCK,
527 [OMAP_DSS_CHANNEL_LCD3] = DSS_CLK_SRC_PLL2_1,
528 };
529
530 u8 ctrl_bit = ctrl_bits[channel];
531
532 if (clk_src == DSS_CLK_SRC_FCK) {
533 /* LCDx_CLK_SWITCH */
534 REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
535 return -EINVAL;
536 }
537
538 if (WARN_ON(allowed_plls[channel] != clk_src))
539 return -EINVAL;
540
541 REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
542
543 return 0;
544}
545
546static int dss_lcd_clk_mux_omap4(enum omap_channel channel,
547 enum dss_clk_source clk_src)
548{
549 const u8 ctrl_bits[] = {
550 [OMAP_DSS_CHANNEL_LCD] = 0,
551 [OMAP_DSS_CHANNEL_LCD2] = 12,
552 };
553 const enum dss_clk_source allowed_plls[] = {
554 [OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1,
555 [OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_PLL2_1,
556 };
557
558 u8 ctrl_bit = ctrl_bits[channel];
559
560 if (clk_src == DSS_CLK_SRC_FCK) {
561 /* LCDx_CLK_SWITCH */
562 REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
563 return 0;
564 }
565
566 if (WARN_ON(allowed_plls[channel] != clk_src))
567 return -EINVAL;
568
569 REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
570
571 return 0;
572}
573
460void dss_select_lcd_clk_source(enum omap_channel channel, 574void dss_select_lcd_clk_source(enum omap_channel channel,
461 enum omap_dss_clk_source clk_src) 575 enum dss_clk_source clk_src)
462{ 576{
463 int b, ix, pos; 577 int idx = dss_get_channel_index(channel);
578 int r;
464 579
465 if (!dss_has_feature(FEAT_LCD_CLK_SRC)) { 580 if (!dss_has_feature(FEAT_LCD_CLK_SRC)) {
466 dss_select_dispc_clk_source(clk_src); 581 dss_select_dispc_clk_source(clk_src);
582 dss.lcd_clk_source[idx] = clk_src;
467 return; 583 return;
468 } 584 }
469 585
470 switch (clk_src) { 586 r = dss.feat->select_lcd_source(channel, clk_src);
471 case OMAP_DSS_CLK_SRC_FCK: 587 if (r)
472 b = 0;
473 break;
474 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
475 BUG_ON(channel != OMAP_DSS_CHANNEL_LCD);
476 b = 1;
477 break;
478 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
479 BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2 &&
480 channel != OMAP_DSS_CHANNEL_LCD3);
481 b = 1;
482 break;
483 default:
484 BUG();
485 return; 588 return;
486 }
487
488 pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 :
489 (channel == OMAP_DSS_CHANNEL_LCD2 ? 12 : 19);
490 REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* LCDx_CLK_SWITCH */
491 589
492 ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 590 dss.lcd_clk_source[idx] = clk_src;
493 (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2);
494 dss.lcd_clk_source[ix] = clk_src;
495} 591}
496 592
497enum omap_dss_clk_source dss_get_dispc_clk_source(void) 593enum dss_clk_source dss_get_dispc_clk_source(void)
498{ 594{
499 return dss.dispc_clk_source; 595 return dss.dispc_clk_source;
500} 596}
501 597
502enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module) 598enum dss_clk_source dss_get_dsi_clk_source(int dsi_module)
503{ 599{
504 return dss.dsi_clk_source[dsi_module]; 600 return dss.dsi_clk_source[dsi_module];
505} 601}
506 602
507enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel) 603enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
508{ 604{
509 if (dss_has_feature(FEAT_LCD_CLK_SRC)) { 605 if (dss_has_feature(FEAT_LCD_CLK_SRC)) {
510 int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 606 int idx = dss_get_channel_index(channel);
511 (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2); 607 return dss.lcd_clk_source[idx];
512 return dss.lcd_clk_source[ix];
513 } else { 608 } else {
514 /* LCD_CLK source is the same as DISPC_FCLK source for 609 /* LCD_CLK source is the same as DISPC_FCLK source for
515 * OMAP2 and OMAP3 */ 610 * OMAP2 and OMAP3 */
@@ -858,6 +953,7 @@ static const struct dss_features omap44xx_dss_feats = {
858 .dpi_select_source = &dss_dpi_select_source_omap4, 953 .dpi_select_source = &dss_dpi_select_source_omap4,
859 .ports = omap2plus_ports, 954 .ports = omap2plus_ports,
860 .num_ports = ARRAY_SIZE(omap2plus_ports), 955 .num_ports = ARRAY_SIZE(omap2plus_ports),
956 .select_lcd_source = &dss_lcd_clk_mux_omap4,
861}; 957};
862 958
863static const struct dss_features omap54xx_dss_feats = { 959static const struct dss_features omap54xx_dss_feats = {
@@ -867,6 +963,7 @@ static const struct dss_features omap54xx_dss_feats = {
867 .dpi_select_source = &dss_dpi_select_source_omap5, 963 .dpi_select_source = &dss_dpi_select_source_omap5,
868 .ports = omap2plus_ports, 964 .ports = omap2plus_ports,
869 .num_ports = ARRAY_SIZE(omap2plus_ports), 965 .num_ports = ARRAY_SIZE(omap2plus_ports),
966 .select_lcd_source = &dss_lcd_clk_mux_omap5,
870}; 967};
871 968
872static const struct dss_features am43xx_dss_feats = { 969static const struct dss_features am43xx_dss_feats = {
@@ -885,6 +982,7 @@ static const struct dss_features dra7xx_dss_feats = {
885 .dpi_select_source = &dss_dpi_select_source_dra7xx, 982 .dpi_select_source = &dss_dpi_select_source_dra7xx,
886 .ports = dra7xx_ports, 983 .ports = dra7xx_ports,
887 .num_ports = ARRAY_SIZE(dra7xx_ports), 984 .num_ports = ARRAY_SIZE(dra7xx_ports),
985 .select_lcd_source = &dss_lcd_clk_mux_dra7,
888}; 986};
889 987
890static int dss_init_features(struct platform_device *pdev) 988static int dss_init_features(struct platform_device *pdev)
@@ -1142,18 +1240,18 @@ static int dss_bind(struct device *dev)
1142 /* Select DPLL */ 1240 /* Select DPLL */
1143 REG_FLD_MOD(DSS_CONTROL, 0, 0, 0); 1241 REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
1144 1242
1145 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); 1243 dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
1146 1244
1147#ifdef CONFIG_OMAP2_DSS_VENC 1245#ifdef CONFIG_OMAP2_DSS_VENC
1148 REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */ 1246 REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
1149 REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */ 1247 REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
1150 REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */ 1248 REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
1151#endif 1249#endif
1152 dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; 1250 dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK;
1153 dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; 1251 dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK;
1154 dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK; 1252 dss.dispc_clk_source = DSS_CLK_SRC_FCK;
1155 dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; 1253 dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK;
1156 dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; 1254 dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK;
1157 1255
1158 rev = dss_read_reg(DSS_REVISION); 1256 rev = dss_read_reg(DSS_REVISION);
1159 printk(KERN_INFO "OMAP DSS rev %d.%d\n", 1257 printk(KERN_INFO "OMAP DSS rev %d.%d\n",
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 38e6ab50142d..4fd06dc41cb3 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -102,6 +102,20 @@ enum dss_writeback_channel {
102 DSS_WB_LCD3_MGR = 7, 102 DSS_WB_LCD3_MGR = 7,
103}; 103};
104 104
105enum dss_clk_source {
106 DSS_CLK_SRC_FCK = 0,
107
108 DSS_CLK_SRC_PLL1_1,
109 DSS_CLK_SRC_PLL1_2,
110 DSS_CLK_SRC_PLL1_3,
111
112 DSS_CLK_SRC_PLL2_1,
113 DSS_CLK_SRC_PLL2_2,
114 DSS_CLK_SRC_PLL2_3,
115
116 DSS_CLK_SRC_HDMI_PLL,
117};
118
105enum dss_pll_id { 119enum dss_pll_id {
106 DSS_PLL_DSI1, 120 DSS_PLL_DSI1,
107 DSS_PLL_DSI2, 121 DSS_PLL_DSI2,
@@ -114,6 +128,11 @@ struct dss_pll;
114 128
115#define DSS_PLL_MAX_HSDIVS 4 129#define DSS_PLL_MAX_HSDIVS 4
116 130
131enum dss_pll_type {
132 DSS_PLL_TYPE_A,
133 DSS_PLL_TYPE_B,
134};
135
117/* 136/*
118 * Type-A PLLs: clkout[]/mX[] refer to hsdiv outputs m4, m5, m6, m7. 137 * Type-A PLLs: clkout[]/mX[] refer to hsdiv outputs m4, m5, m6, m7.
119 * Type-B PLLs: clkout[0] refers to m2. 138 * Type-B PLLs: clkout[0] refers to m2.
@@ -140,6 +159,8 @@ struct dss_pll_ops {
140}; 159};
141 160
142struct dss_pll_hw { 161struct dss_pll_hw {
162 enum dss_pll_type type;
163
143 unsigned n_max; 164 unsigned n_max;
144 unsigned m_min; 165 unsigned m_min;
145 unsigned m_max; 166 unsigned m_max;
@@ -227,7 +248,7 @@ unsigned long dss_get_dispc_clk_rate(void);
227int dss_dpi_select_source(int port, enum omap_channel channel); 248int dss_dpi_select_source(int port, enum omap_channel channel);
228void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select); 249void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
229enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void); 250enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
230const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src); 251const char *dss_get_clk_source_name(enum dss_clk_source clk_src);
231void dss_dump_clocks(struct seq_file *s); 252void dss_dump_clocks(struct seq_file *s);
232 253
233/* DSS VIDEO PLL */ 254/* DSS VIDEO PLL */
@@ -244,20 +265,18 @@ void dss_debug_dump_clocks(struct seq_file *s);
244#endif 265#endif
245 266
246void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable); 267void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable);
247void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id,
248 enum omap_channel channel);
249 268
250void dss_sdi_init(int datapairs); 269void dss_sdi_init(int datapairs);
251int dss_sdi_enable(void); 270int dss_sdi_enable(void);
252void dss_sdi_disable(void); 271void dss_sdi_disable(void);
253 272
254void dss_select_dsi_clk_source(int dsi_module, 273void dss_select_dsi_clk_source(int dsi_module,
255 enum omap_dss_clk_source clk_src); 274 enum dss_clk_source clk_src);
256void dss_select_lcd_clk_source(enum omap_channel channel, 275void dss_select_lcd_clk_source(enum omap_channel channel,
257 enum omap_dss_clk_source clk_src); 276 enum dss_clk_source clk_src);
258enum omap_dss_clk_source dss_get_dispc_clk_source(void); 277enum dss_clk_source dss_get_dispc_clk_source(void);
259enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module); 278enum dss_clk_source dss_get_dsi_clk_source(int dsi_module);
260enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel); 279enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel);
261 280
262void dss_set_venc_output(enum omap_dss_venc_type type); 281void dss_set_venc_output(enum omap_dss_venc_type type);
263void dss_set_dac_pwrdn_bgz(bool enable); 282void dss_set_dac_pwrdn_bgz(bool enable);
@@ -409,17 +428,23 @@ typedef bool (*dss_hsdiv_calc_func)(int m_dispc, unsigned long dispc,
409int dss_pll_register(struct dss_pll *pll); 428int dss_pll_register(struct dss_pll *pll);
410void dss_pll_unregister(struct dss_pll *pll); 429void dss_pll_unregister(struct dss_pll *pll);
411struct dss_pll *dss_pll_find(const char *name); 430struct dss_pll *dss_pll_find(const char *name);
431struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src);
432unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src);
412int dss_pll_enable(struct dss_pll *pll); 433int dss_pll_enable(struct dss_pll *pll);
413void dss_pll_disable(struct dss_pll *pll); 434void dss_pll_disable(struct dss_pll *pll);
414int dss_pll_set_config(struct dss_pll *pll, 435int dss_pll_set_config(struct dss_pll *pll,
415 const struct dss_pll_clock_info *cinfo); 436 const struct dss_pll_clock_info *cinfo);
416 437
417bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco, 438bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco,
418 unsigned long out_min, unsigned long out_max, 439 unsigned long out_min, unsigned long out_max,
419 dss_hsdiv_calc_func func, void *data); 440 dss_hsdiv_calc_func func, void *data);
420bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin, 441bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
421 unsigned long pll_min, unsigned long pll_max, 442 unsigned long pll_min, unsigned long pll_max,
422 dss_pll_calc_func func, void *data); 443 dss_pll_calc_func func, void *data);
444
445bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin,
446 unsigned long target_clkout, struct dss_pll_clock_info *cinfo);
447
423int dss_pll_write_config_type_a(struct dss_pll *pll, 448int dss_pll_write_config_type_a(struct dss_pll *pll,
424 const struct dss_pll_clock_info *cinfo); 449 const struct dss_pll_clock_info *cinfo);
425int dss_pll_write_config_type_b(struct dss_pll *pll, 450int dss_pll_write_config_type_b(struct dss_pll *pll,
diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.c b/drivers/gpu/drm/omapdrm/dss/dss_features.c
index c886a2927f73..ee5b93ce2763 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss_features.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss_features.c
@@ -23,8 +23,7 @@
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25 25
26#include <video/omapdss.h> 26#include "omapdss.h"
27
28#include "dss.h" 27#include "dss.h"
29#include "dss_features.h" 28#include "dss_features.h"
30 29
@@ -50,7 +49,6 @@ struct omap_dss_features {
50 const enum omap_dss_output_id *supported_outputs; 49 const enum omap_dss_output_id *supported_outputs;
51 const enum omap_color_mode *supported_color_modes; 50 const enum omap_color_mode *supported_color_modes;
52 const enum omap_overlay_caps *overlay_caps; 51 const enum omap_overlay_caps *overlay_caps;
53 const char * const *clksrc_names;
54 const struct dss_param_range *dss_params; 52 const struct dss_param_range *dss_params;
55 53
56 const enum omap_dss_rotation_type supported_rotation_types; 54 const enum omap_dss_rotation_type supported_rotation_types;
@@ -389,34 +387,6 @@ static const enum omap_overlay_caps omap4_dss_overlay_caps[] = {
389 OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION, 387 OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
390}; 388};
391 389
392static const char * const omap2_dss_clk_source_names[] = {
393 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "N/A",
394 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "N/A",
395 [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK1",
396};
397
398static const char * const omap3_dss_clk_source_names[] = {
399 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI1_PLL_FCLK",
400 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI2_PLL_FCLK",
401 [OMAP_DSS_CLK_SRC_FCK] = "DSS1_ALWON_FCLK",
402};
403
404static const char * const omap4_dss_clk_source_names[] = {
405 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "PLL1_CLK1",
406 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "PLL1_CLK2",
407 [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK",
408 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "PLL2_CLK1",
409 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "PLL2_CLK2",
410};
411
412static const char * const omap5_dss_clk_source_names[] = {
413 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DPLL_DSI1_A_CLK1",
414 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DPLL_DSI1_A_CLK2",
415 [OMAP_DSS_CLK_SRC_FCK] = "DSS_CLK",
416 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DPLL_DSI1_C_CLK1",
417 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DPLL_DSI1_C_CLK2",
418};
419
420static const struct dss_param_range omap2_dss_param_range[] = { 390static const struct dss_param_range omap2_dss_param_range[] = {
421 [FEAT_PARAM_DSS_FCK] = { 0, 133000000 }, 391 [FEAT_PARAM_DSS_FCK] = { 0, 133000000 },
422 [FEAT_PARAM_DSS_PCD] = { 2, 255 }, 392 [FEAT_PARAM_DSS_PCD] = { 2, 255 },
@@ -631,7 +601,6 @@ static const struct omap_dss_features omap2_dss_features = {
631 .supported_outputs = omap2_dss_supported_outputs, 601 .supported_outputs = omap2_dss_supported_outputs,
632 .supported_color_modes = omap2_dss_supported_color_modes, 602 .supported_color_modes = omap2_dss_supported_color_modes,
633 .overlay_caps = omap2_dss_overlay_caps, 603 .overlay_caps = omap2_dss_overlay_caps,
634 .clksrc_names = omap2_dss_clk_source_names,
635 .dss_params = omap2_dss_param_range, 604 .dss_params = omap2_dss_param_range,
636 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, 605 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
637 .buffer_size_unit = 1, 606 .buffer_size_unit = 1,
@@ -652,7 +621,6 @@ static const struct omap_dss_features omap3430_dss_features = {
652 .supported_outputs = omap3430_dss_supported_outputs, 621 .supported_outputs = omap3430_dss_supported_outputs,
653 .supported_color_modes = omap3_dss_supported_color_modes, 622 .supported_color_modes = omap3_dss_supported_color_modes,
654 .overlay_caps = omap3430_dss_overlay_caps, 623 .overlay_caps = omap3430_dss_overlay_caps,
655 .clksrc_names = omap3_dss_clk_source_names,
656 .dss_params = omap3_dss_param_range, 624 .dss_params = omap3_dss_param_range,
657 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, 625 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
658 .buffer_size_unit = 1, 626 .buffer_size_unit = 1,
@@ -676,7 +644,6 @@ static const struct omap_dss_features am35xx_dss_features = {
676 .supported_outputs = omap3430_dss_supported_outputs, 644 .supported_outputs = omap3430_dss_supported_outputs,
677 .supported_color_modes = omap3_dss_supported_color_modes, 645 .supported_color_modes = omap3_dss_supported_color_modes,
678 .overlay_caps = omap3430_dss_overlay_caps, 646 .overlay_caps = omap3430_dss_overlay_caps,
679 .clksrc_names = omap3_dss_clk_source_names,
680 .dss_params = omap3_dss_param_range, 647 .dss_params = omap3_dss_param_range,
681 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, 648 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
682 .buffer_size_unit = 1, 649 .buffer_size_unit = 1,
@@ -696,7 +663,6 @@ static const struct omap_dss_features am43xx_dss_features = {
696 .supported_outputs = am43xx_dss_supported_outputs, 663 .supported_outputs = am43xx_dss_supported_outputs,
697 .supported_color_modes = omap3_dss_supported_color_modes, 664 .supported_color_modes = omap3_dss_supported_color_modes,
698 .overlay_caps = omap3430_dss_overlay_caps, 665 .overlay_caps = omap3430_dss_overlay_caps,
699 .clksrc_names = omap2_dss_clk_source_names,
700 .dss_params = am43xx_dss_param_range, 666 .dss_params = am43xx_dss_param_range,
701 .supported_rotation_types = OMAP_DSS_ROT_DMA, 667 .supported_rotation_types = OMAP_DSS_ROT_DMA,
702 .buffer_size_unit = 1, 668 .buffer_size_unit = 1,
@@ -716,7 +682,6 @@ static const struct omap_dss_features omap3630_dss_features = {
716 .supported_outputs = omap3630_dss_supported_outputs, 682 .supported_outputs = omap3630_dss_supported_outputs,
717 .supported_color_modes = omap3_dss_supported_color_modes, 683 .supported_color_modes = omap3_dss_supported_color_modes,
718 .overlay_caps = omap3630_dss_overlay_caps, 684 .overlay_caps = omap3630_dss_overlay_caps,
719 .clksrc_names = omap3_dss_clk_source_names,
720 .dss_params = omap3_dss_param_range, 685 .dss_params = omap3_dss_param_range,
721 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, 686 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
722 .buffer_size_unit = 1, 687 .buffer_size_unit = 1,
@@ -738,7 +703,6 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
738 .supported_outputs = omap4_dss_supported_outputs, 703 .supported_outputs = omap4_dss_supported_outputs,
739 .supported_color_modes = omap4_dss_supported_color_modes, 704 .supported_color_modes = omap4_dss_supported_color_modes,
740 .overlay_caps = omap4_dss_overlay_caps, 705 .overlay_caps = omap4_dss_overlay_caps,
741 .clksrc_names = omap4_dss_clk_source_names,
742 .dss_params = omap4_dss_param_range, 706 .dss_params = omap4_dss_param_range,
743 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, 707 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
744 .buffer_size_unit = 16, 708 .buffer_size_unit = 16,
@@ -759,7 +723,6 @@ static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = {
759 .supported_outputs = omap4_dss_supported_outputs, 723 .supported_outputs = omap4_dss_supported_outputs,
760 .supported_color_modes = omap4_dss_supported_color_modes, 724 .supported_color_modes = omap4_dss_supported_color_modes,
761 .overlay_caps = omap4_dss_overlay_caps, 725 .overlay_caps = omap4_dss_overlay_caps,
762 .clksrc_names = omap4_dss_clk_source_names,
763 .dss_params = omap4_dss_param_range, 726 .dss_params = omap4_dss_param_range,
764 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, 727 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
765 .buffer_size_unit = 16, 728 .buffer_size_unit = 16,
@@ -780,7 +743,6 @@ static const struct omap_dss_features omap4_dss_features = {
780 .supported_outputs = omap4_dss_supported_outputs, 743 .supported_outputs = omap4_dss_supported_outputs,
781 .supported_color_modes = omap4_dss_supported_color_modes, 744 .supported_color_modes = omap4_dss_supported_color_modes,
782 .overlay_caps = omap4_dss_overlay_caps, 745 .overlay_caps = omap4_dss_overlay_caps,
783 .clksrc_names = omap4_dss_clk_source_names,
784 .dss_params = omap4_dss_param_range, 746 .dss_params = omap4_dss_param_range,
785 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, 747 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
786 .buffer_size_unit = 16, 748 .buffer_size_unit = 16,
@@ -801,7 +763,6 @@ static const struct omap_dss_features omap5_dss_features = {
801 .supported_outputs = omap5_dss_supported_outputs, 763 .supported_outputs = omap5_dss_supported_outputs,
802 .supported_color_modes = omap4_dss_supported_color_modes, 764 .supported_color_modes = omap4_dss_supported_color_modes,
803 .overlay_caps = omap4_dss_overlay_caps, 765 .overlay_caps = omap4_dss_overlay_caps,
804 .clksrc_names = omap5_dss_clk_source_names,
805 .dss_params = omap5_dss_param_range, 766 .dss_params = omap5_dss_param_range,
806 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, 767 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
807 .buffer_size_unit = 16, 768 .buffer_size_unit = 16,
@@ -859,11 +820,6 @@ bool dss_feat_color_mode_supported(enum omap_plane plane,
859 color_mode; 820 color_mode;
860} 821}
861 822
862const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id)
863{
864 return omap_current_dss_features->clksrc_names[id];
865}
866
867u32 dss_feat_get_buffer_size_unit(void) 823u32 dss_feat_get_buffer_size_unit(void)
868{ 824{
869 return omap_current_dss_features->buffer_size_unit; 825 return omap_current_dss_features->buffer_size_unit;
diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.h b/drivers/gpu/drm/omapdrm/dss/dss_features.h
index 3d67d39f192f..bb4b7f0e642b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss_features.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss_features.h
@@ -91,7 +91,6 @@ unsigned long dss_feat_get_param_max(enum dss_range_param param);
91enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane); 91enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane);
92bool dss_feat_color_mode_supported(enum omap_plane plane, 92bool dss_feat_color_mode_supported(enum omap_plane plane,
93 enum omap_color_mode color_mode); 93 enum omap_color_mode color_mode);
94const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
95 94
96u32 dss_feat_get_buffer_size_unit(void); /* in bytes */ 95u32 dss_feat_get_buffer_size_unit(void); /* in bytes */
97u32 dss_feat_get_burst_size_unit(void); /* in bytes */ 96u32 dss_feat_get_burst_size_unit(void); /* in bytes */
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index 53616b02b613..63e711545865 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -23,8 +23,9 @@
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/hdmi.h> 25#include <linux/hdmi.h>
26#include <video/omapdss.h> 26#include <sound/omap-hdmi-audio.h>
27 27
28#include "omapdss.h"
28#include "dss.h" 29#include "dss.h"
29 30
30/* HDMI Wrapper */ 31/* HDMI Wrapper */
@@ -240,6 +241,7 @@ struct hdmi_pll_data {
240 241
241 void __iomem *base; 242 void __iomem *base;
242 243
244 struct platform_device *pdev;
243 struct hdmi_wp_data *wp; 245 struct hdmi_wp_data *wp;
244}; 246};
245 247
@@ -306,8 +308,6 @@ phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
306 308
307/* HDMI PLL funcs */ 309/* HDMI PLL funcs */
308void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s); 310void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s);
309void hdmi_pll_compute(struct hdmi_pll_data *pll,
310 unsigned long target_tmds, struct dss_pll_clock_info *pi);
311int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll, 311int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
312 struct hdmi_wp_data *wp); 312 struct hdmi_wp_data *wp);
313void hdmi_pll_uninit(struct hdmi_pll_data *hpll); 313void hdmi_pll_uninit(struct hdmi_pll_data *hpll);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index f892ae157ff3..cbd28dfdb86a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -33,9 +33,10 @@
33#include <linux/gpio.h> 33#include <linux/gpio.h>
34#include <linux/regulator/consumer.h> 34#include <linux/regulator/consumer.h>
35#include <linux/component.h> 35#include <linux/component.h>
36#include <video/omapdss.h> 36#include <linux/of.h>
37#include <sound/omap-hdmi-audio.h> 37#include <sound/omap-hdmi-audio.h>
38 38
39#include "omapdss.h"
39#include "hdmi4_core.h" 40#include "hdmi4_core.h"
40#include "dss.h" 41#include "dss.h"
41#include "dss_features.h" 42#include "dss_features.h"
@@ -100,7 +101,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
100 101
101static int hdmi_init_regulator(void) 102static int hdmi_init_regulator(void)
102{ 103{
103 int r;
104 struct regulator *reg; 104 struct regulator *reg;
105 105
106 if (hdmi.vdda_reg != NULL) 106 if (hdmi.vdda_reg != NULL)
@@ -114,15 +114,6 @@ static int hdmi_init_regulator(void)
114 return PTR_ERR(reg); 114 return PTR_ERR(reg);
115 } 115 }
116 116
117 if (regulator_can_change_voltage(reg)) {
118 r = regulator_set_voltage(reg, 1800000, 1800000);
119 if (r) {
120 devm_regulator_put(reg);
121 DSSWARN("can't set the regulator voltage\n");
122 return r;
123 }
124 }
125
126 hdmi.vdda_reg = reg; 117 hdmi.vdda_reg = reg;
127 118
128 return 0; 119 return 0;
@@ -186,7 +177,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
186 if (p->double_pixel) 177 if (p->double_pixel)
187 pc *= 2; 178 pc *= 2;
188 179
189 hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo); 180 /* DSS_HDMI_TCLK is bitclk / 10 */
181 pc *= 10;
182
183 dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin),
184 pc, &hdmi_cinfo);
190 185
191 r = dss_pll_enable(&hdmi.pll.pll); 186 r = dss_pll_enable(&hdmi.pll.pll);
192 if (r) { 187 if (r) {
@@ -213,9 +208,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
213 208
214 hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg); 209 hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
215 210
216 /* bypass TV gamma table */
217 dispc_enable_gamma_table(0);
218
219 /* tv size */ 211 /* tv size */
220 dss_mgr_set_timings(channel, p); 212 dss_mgr_set_timings(channel, p);
221 213
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index fa72e735dad2..ef3afe99e487 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -211,7 +211,7 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg)
211static void hdmi_core_powerdown_disable(struct hdmi_core_data *core) 211static void hdmi_core_powerdown_disable(struct hdmi_core_data *core)
212{ 212{
213 DSSDBG("Enter hdmi_core_powerdown_disable\n"); 213 DSSDBG("Enter hdmi_core_powerdown_disable\n");
214 REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x0, 0, 0); 214 REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x1, 0, 0);
215} 215}
216 216
217static void hdmi_core_swreset_release(struct hdmi_core_data *core) 217static void hdmi_core_swreset_release(struct hdmi_core_data *core)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index a43f7b10e113..061f9bab4c9b 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -38,9 +38,10 @@
38#include <linux/gpio.h> 38#include <linux/gpio.h>
39#include <linux/regulator/consumer.h> 39#include <linux/regulator/consumer.h>
40#include <linux/component.h> 40#include <linux/component.h>
41#include <video/omapdss.h> 41#include <linux/of.h>
42#include <sound/omap-hdmi-audio.h> 42#include <sound/omap-hdmi-audio.h>
43 43
44#include "omapdss.h"
44#include "hdmi5_core.h" 45#include "hdmi5_core.h"
45#include "dss.h" 46#include "dss.h"
46#include "dss_features.h" 47#include "dss_features.h"
@@ -131,15 +132,6 @@ static int hdmi_init_regulator(void)
131 return PTR_ERR(reg); 132 return PTR_ERR(reg);
132 } 133 }
133 134
134 if (regulator_can_change_voltage(reg)) {
135 r = regulator_set_voltage(reg, 1800000, 1800000);
136 if (r) {
137 devm_regulator_put(reg);
138 DSSWARN("can't set the regulator voltage\n");
139 return r;
140 }
141 }
142
143 hdmi.vdda_reg = reg; 135 hdmi.vdda_reg = reg;
144 136
145 return 0; 137 return 0;
@@ -198,7 +190,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
198 if (p->double_pixel) 190 if (p->double_pixel)
199 pc *= 2; 191 pc *= 2;
200 192
201 hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo); 193 /* DSS_HDMI_TCLK is bitclk / 10 */
194 pc *= 10;
195
196 dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin),
197 pc, &hdmi_cinfo);
202 198
203 /* disable and clear irqs */ 199 /* disable and clear irqs */
204 hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff); 200 hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
@@ -230,9 +226,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
230 226
231 hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg); 227 hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
232 228
233 /* bypass TV gamma table */
234 dispc_enable_gamma_table(0);
235
236 /* tv size */ 229 /* tv size */
237 dss_mgr_set_timings(channel, p); 230 dss_mgr_set_timings(channel, p);
238 231
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 6a397520cae5..8ab2093daa12 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core)
51{ 51{
52 void __iomem *base = core->base; 52 void __iomem *base = core->base;
53 const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ 53 const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
54 const unsigned ss_scl_high = 4000; /* ns */ 54 const unsigned ss_scl_high = 4600; /* ns */
55 const unsigned ss_scl_low = 4700; /* ns */ 55 const unsigned ss_scl_low = 5400; /* ns */
56 const unsigned fs_scl_high = 600; /* ns */ 56 const unsigned fs_scl_high = 600; /* ns */
57 const unsigned fs_scl_low = 1300; /* ns */ 57 const unsigned fs_scl_low = 1300; /* ns */
58 const unsigned sda_hold = 1000; /* ns */ 58 const unsigned sda_hold = 1000; /* ns */
@@ -458,7 +458,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
458 458
459 c = (ptr[1] >> 6) & 0x3; 459 c = (ptr[1] >> 6) & 0x3;
460 m = (ptr[1] >> 4) & 0x3; 460 m = (ptr[1] >> 4) & 0x3;
461 r = (ptr[1] >> 0) & 0x3; 461 r = (ptr[1] >> 0) & 0xf;
462 462
463 itc = (ptr[2] >> 7) & 0x1; 463 itc = (ptr[2] >> 7) & 0x1;
464 ec = (ptr[2] >> 4) & 0x7; 464 ec = (ptr[2] >> 4) & 0x7;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
index 1b8fcc6c4ba1..4dfb67fe5f6d 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
@@ -4,8 +4,8 @@
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/err.h> 5#include <linux/err.h>
6#include <linux/of.h> 6#include <linux/of.h>
7#include <video/omapdss.h>
8 7
8#include "omapdss.h"
9#include "hdmi.h" 9#include "hdmi.h"
10 10
11int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep, 11int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep,
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
index 1f5d19c119ce..3ead47cccac5 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
@@ -13,8 +13,9 @@
13#include <linux/io.h> 13#include <linux/io.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <video/omapdss.h> 16#include <linux/seq_file.h>
17 17
18#include "omapdss.h"
18#include "dss.h" 19#include "dss.h"
19#include "hdmi.h" 20#include "hdmi.h"
20 21
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
index 06e23a7c432c..b8bf6a9e5557 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
@@ -16,9 +16,10 @@
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/seq_file.h>
20#include <linux/pm_runtime.h>
19 21
20#include <video/omapdss.h> 22#include "omapdss.h"
21
22#include "dss.h" 23#include "dss.h"
23#include "hdmi.h" 24#include "hdmi.h"
24 25
@@ -38,71 +39,14 @@ void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s)
38 DUMPPLL(PLLCTRL_CFG4); 39 DUMPPLL(PLLCTRL_CFG4);
39} 40}
40 41
41void hdmi_pll_compute(struct hdmi_pll_data *pll,
42 unsigned long target_tmds, struct dss_pll_clock_info *pi)
43{
44 unsigned long fint, clkdco, clkout;
45 unsigned long target_bitclk, target_clkdco;
46 unsigned long min_dco;
47 unsigned n, m, mf, m2, sd;
48 unsigned long clkin;
49 const struct dss_pll_hw *hw = pll->pll.hw;
50
51 clkin = clk_get_rate(pll->pll.clkin);
52
53 DSSDBG("clkin %lu, target tmds %lu\n", clkin, target_tmds);
54
55 target_bitclk = target_tmds * 10;
56
57 /* Fint */
58 n = DIV_ROUND_UP(clkin, hw->fint_max);
59 fint = clkin / n;
60
61 /* adjust m2 so that the clkdco will be high enough */
62 min_dco = roundup(hw->clkdco_min, fint);
63 m2 = DIV_ROUND_UP(min_dco, target_bitclk);
64 if (m2 == 0)
65 m2 = 1;
66
67 target_clkdco = target_bitclk * m2;
68 m = target_clkdco / fint;
69
70 clkdco = fint * m;
71
72 /* adjust clkdco with fractional mf */
73 if (WARN_ON(target_clkdco - clkdco > fint))
74 mf = 0;
75 else
76 mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint);
77
78 if (mf > 0)
79 clkdco += (u32)div_u64((u64)mf * fint, 262144);
80
81 clkout = clkdco / m2;
82
83 /* sigma-delta */
84 sd = DIV_ROUND_UP(fint * m, 250000000);
85
86 DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n",
87 n, m, mf, m2, sd);
88 DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout);
89
90 pi->n = n;
91 pi->m = m;
92 pi->mf = mf;
93 pi->mX[0] = m2;
94 pi->sd = sd;
95
96 pi->fint = fint;
97 pi->clkdco = clkdco;
98 pi->clkout[0] = clkout;
99}
100
101static int hdmi_pll_enable(struct dss_pll *dsspll) 42static int hdmi_pll_enable(struct dss_pll *dsspll)
102{ 43{
103 struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll); 44 struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
104 struct hdmi_wp_data *wp = pll->wp; 45 struct hdmi_wp_data *wp = pll->wp;
105 u16 r = 0; 46 int r;
47
48 r = pm_runtime_get_sync(&pll->pdev->dev);
49 WARN_ON(r < 0);
106 50
107 dss_ctrl_pll_enable(DSS_PLL_HDMI, true); 51 dss_ctrl_pll_enable(DSS_PLL_HDMI, true);
108 52
@@ -117,10 +61,14 @@ static void hdmi_pll_disable(struct dss_pll *dsspll)
117{ 61{
118 struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll); 62 struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
119 struct hdmi_wp_data *wp = pll->wp; 63 struct hdmi_wp_data *wp = pll->wp;
64 int r;
120 65
121 hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF); 66 hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
122 67
123 dss_ctrl_pll_enable(DSS_PLL_HDMI, false); 68 dss_ctrl_pll_enable(DSS_PLL_HDMI, false);
69
70 r = pm_runtime_put_sync(&pll->pdev->dev);
71 WARN_ON(r < 0 && r != -ENOSYS);
124} 72}
125 73
126static const struct dss_pll_ops dsi_pll_ops = { 74static const struct dss_pll_ops dsi_pll_ops = {
@@ -130,6 +78,8 @@ static const struct dss_pll_ops dsi_pll_ops = {
130}; 78};
131 79
132static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = { 80static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
81 .type = DSS_PLL_TYPE_B,
82
133 .n_max = 255, 83 .n_max = 255,
134 .m_min = 20, 84 .m_min = 20,
135 .m_max = 4095, 85 .m_max = 4095,
@@ -153,6 +103,8 @@ static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
153}; 103};
154 104
155static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = { 105static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
106 .type = DSS_PLL_TYPE_B,
107
156 .n_max = 255, 108 .n_max = 255,
157 .m_min = 20, 109 .m_min = 20,
158 .m_max = 2045, 110 .m_max = 2045,
@@ -224,6 +176,7 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
224 int r; 176 int r;
225 struct resource *res; 177 struct resource *res;
226 178
179 pll->pdev = pdev;
227 pll->wp = wp; 180 pll->wp = wp;
228 181
229 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll"); 182 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 13442b9052d1..203694a52d18 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -14,8 +14,9 @@
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <video/omapdss.h> 17#include <linux/seq_file.h>
18 18
19#include "omapdss.h"
19#include "dss.h" 20#include "dss.h"
20#include "hdmi.h" 21#include "hdmi.h"
21 22
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index d7e7c909bbc2..6eaf1adbd606 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -18,7 +18,872 @@
18#ifndef __OMAP_DRM_DSS_H 18#ifndef __OMAP_DRM_DSS_H
19#define __OMAP_DRM_DSS_H 19#define __OMAP_DRM_DSS_H
20 20
21#include <video/omapdss.h> 21#include <linux/list.h>
22#include <linux/kobject.h>
23#include <linux/device.h>
24#include <linux/interrupt.h>
25#include <video/videomode.h>
26#include <linux/platform_data/omapdss.h>
27#include <uapi/drm/drm_mode.h>
28
29#define DISPC_IRQ_FRAMEDONE (1 << 0)
30#define DISPC_IRQ_VSYNC (1 << 1)
31#define DISPC_IRQ_EVSYNC_EVEN (1 << 2)
32#define DISPC_IRQ_EVSYNC_ODD (1 << 3)
33#define DISPC_IRQ_ACBIAS_COUNT_STAT (1 << 4)
34#define DISPC_IRQ_PROG_LINE_NUM (1 << 5)
35#define DISPC_IRQ_GFX_FIFO_UNDERFLOW (1 << 6)
36#define DISPC_IRQ_GFX_END_WIN (1 << 7)
37#define DISPC_IRQ_PAL_GAMMA_MASK (1 << 8)
38#define DISPC_IRQ_OCP_ERR (1 << 9)
39#define DISPC_IRQ_VID1_FIFO_UNDERFLOW (1 << 10)
40#define DISPC_IRQ_VID1_END_WIN (1 << 11)
41#define DISPC_IRQ_VID2_FIFO_UNDERFLOW (1 << 12)
42#define DISPC_IRQ_VID2_END_WIN (1 << 13)
43#define DISPC_IRQ_SYNC_LOST (1 << 14)
44#define DISPC_IRQ_SYNC_LOST_DIGIT (1 << 15)
45#define DISPC_IRQ_WAKEUP (1 << 16)
46#define DISPC_IRQ_SYNC_LOST2 (1 << 17)
47#define DISPC_IRQ_VSYNC2 (1 << 18)
48#define DISPC_IRQ_VID3_END_WIN (1 << 19)
49#define DISPC_IRQ_VID3_FIFO_UNDERFLOW (1 << 20)
50#define DISPC_IRQ_ACBIAS_COUNT_STAT2 (1 << 21)
51#define DISPC_IRQ_FRAMEDONE2 (1 << 22)
52#define DISPC_IRQ_FRAMEDONEWB (1 << 23)
53#define DISPC_IRQ_FRAMEDONETV (1 << 24)
54#define DISPC_IRQ_WBBUFFEROVERFLOW (1 << 25)
55#define DISPC_IRQ_WBUNCOMPLETEERROR (1 << 26)
56#define DISPC_IRQ_SYNC_LOST3 (1 << 27)
57#define DISPC_IRQ_VSYNC3 (1 << 28)
58#define DISPC_IRQ_ACBIAS_COUNT_STAT3 (1 << 29)
59#define DISPC_IRQ_FRAMEDONE3 (1 << 30)
60
61struct omap_dss_device;
62struct omap_overlay_manager;
63struct dss_lcd_mgr_config;
64struct snd_aes_iec958;
65struct snd_cea_861_aud_if;
66struct hdmi_avi_infoframe;
67
68enum omap_display_type {
69 OMAP_DISPLAY_TYPE_NONE = 0,
70 OMAP_DISPLAY_TYPE_DPI = 1 << 0,
71 OMAP_DISPLAY_TYPE_DBI = 1 << 1,
72 OMAP_DISPLAY_TYPE_SDI = 1 << 2,
73 OMAP_DISPLAY_TYPE_DSI = 1 << 3,
74 OMAP_DISPLAY_TYPE_VENC = 1 << 4,
75 OMAP_DISPLAY_TYPE_HDMI = 1 << 5,
76 OMAP_DISPLAY_TYPE_DVI = 1 << 6,
77};
78
79enum omap_plane {
80 OMAP_DSS_GFX = 0,
81 OMAP_DSS_VIDEO1 = 1,
82 OMAP_DSS_VIDEO2 = 2,
83 OMAP_DSS_VIDEO3 = 3,
84 OMAP_DSS_WB = 4,
85};
86
87enum omap_channel {
88 OMAP_DSS_CHANNEL_LCD = 0,
89 OMAP_DSS_CHANNEL_DIGIT = 1,
90 OMAP_DSS_CHANNEL_LCD2 = 2,
91 OMAP_DSS_CHANNEL_LCD3 = 3,
92 OMAP_DSS_CHANNEL_WB = 4,
93};
94
95enum omap_color_mode {
96 OMAP_DSS_COLOR_CLUT1 = 1 << 0, /* BITMAP 1 */
97 OMAP_DSS_COLOR_CLUT2 = 1 << 1, /* BITMAP 2 */
98 OMAP_DSS_COLOR_CLUT4 = 1 << 2, /* BITMAP 4 */
99 OMAP_DSS_COLOR_CLUT8 = 1 << 3, /* BITMAP 8 */
100 OMAP_DSS_COLOR_RGB12U = 1 << 4, /* RGB12, 16-bit container */
101 OMAP_DSS_COLOR_ARGB16 = 1 << 5, /* ARGB16 */
102 OMAP_DSS_COLOR_RGB16 = 1 << 6, /* RGB16 */
103 OMAP_DSS_COLOR_RGB24U = 1 << 7, /* RGB24, 32-bit container */
104 OMAP_DSS_COLOR_RGB24P = 1 << 8, /* RGB24, 24-bit container */
105 OMAP_DSS_COLOR_YUV2 = 1 << 9, /* YUV2 4:2:2 co-sited */
106 OMAP_DSS_COLOR_UYVY = 1 << 10, /* UYVY 4:2:2 co-sited */
107 OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */
108 OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */
109 OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */
110 OMAP_DSS_COLOR_NV12 = 1 << 14, /* NV12 format: YUV 4:2:0 */
111 OMAP_DSS_COLOR_RGBA16 = 1 << 15, /* RGBA16 - 4444 */
112 OMAP_DSS_COLOR_RGBX16 = 1 << 16, /* RGBx16 - 4444 */
113 OMAP_DSS_COLOR_ARGB16_1555 = 1 << 17, /* ARGB16 - 1555 */
114 OMAP_DSS_COLOR_XRGB16_1555 = 1 << 18, /* xRGB16 - 1555 */
115};
116
117enum omap_dss_load_mode {
118 OMAP_DSS_LOAD_CLUT_AND_FRAME = 0,
119 OMAP_DSS_LOAD_CLUT_ONLY = 1,
120 OMAP_DSS_LOAD_FRAME_ONLY = 2,
121 OMAP_DSS_LOAD_CLUT_ONCE_FRAME = 3,
122};
123
124enum omap_dss_trans_key_type {
125 OMAP_DSS_COLOR_KEY_GFX_DST = 0,
126 OMAP_DSS_COLOR_KEY_VID_SRC = 1,
127};
128
129enum omap_rfbi_te_mode {
130 OMAP_DSS_RFBI_TE_MODE_1 = 1,
131 OMAP_DSS_RFBI_TE_MODE_2 = 2,
132};
133
134enum omap_dss_signal_level {
135 OMAPDSS_SIG_ACTIVE_LOW,
136 OMAPDSS_SIG_ACTIVE_HIGH,
137};
138
139enum omap_dss_signal_edge {
140 OMAPDSS_DRIVE_SIG_FALLING_EDGE,
141 OMAPDSS_DRIVE_SIG_RISING_EDGE,
142};
143
144enum omap_dss_venc_type {
145 OMAP_DSS_VENC_TYPE_COMPOSITE,
146 OMAP_DSS_VENC_TYPE_SVIDEO,
147};
148
149enum omap_dss_dsi_pixel_format {
150 OMAP_DSS_DSI_FMT_RGB888,
151 OMAP_DSS_DSI_FMT_RGB666,
152 OMAP_DSS_DSI_FMT_RGB666_PACKED,
153 OMAP_DSS_DSI_FMT_RGB565,
154};
155
156enum omap_dss_dsi_mode {
157 OMAP_DSS_DSI_CMD_MODE = 0,
158 OMAP_DSS_DSI_VIDEO_MODE,
159};
160
161enum omap_display_caps {
162 OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE = 1 << 0,
163 OMAP_DSS_DISPLAY_CAP_TEAR_ELIM = 1 << 1,
164};
165
166enum omap_dss_display_state {
167 OMAP_DSS_DISPLAY_DISABLED = 0,
168 OMAP_DSS_DISPLAY_ACTIVE,
169};
170
171enum omap_dss_rotation_type {
172 OMAP_DSS_ROT_DMA = 1 << 0,
173 OMAP_DSS_ROT_VRFB = 1 << 1,
174 OMAP_DSS_ROT_TILER = 1 << 2,
175};
176
177/* clockwise rotation angle */
178enum omap_dss_rotation_angle {
179 OMAP_DSS_ROT_0 = 0,
180 OMAP_DSS_ROT_90 = 1,
181 OMAP_DSS_ROT_180 = 2,
182 OMAP_DSS_ROT_270 = 3,
183};
184
185enum omap_overlay_caps {
186 OMAP_DSS_OVL_CAP_SCALE = 1 << 0,
187 OMAP_DSS_OVL_CAP_GLOBAL_ALPHA = 1 << 1,
188 OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA = 1 << 2,
189 OMAP_DSS_OVL_CAP_ZORDER = 1 << 3,
190 OMAP_DSS_OVL_CAP_POS = 1 << 4,
191 OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5,
192};
193
194enum omap_overlay_manager_caps {
195 OMAP_DSS_DUMMY_VALUE, /* add a dummy value to prevent compiler error */
196};
197
198enum omap_dss_clk_source {
199 OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK
200 * OMAP4: DSS_FCLK */
201 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
202 * OMAP4: PLL1_CLK1 */
203 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
204 * OMAP4: PLL1_CLK2 */
205 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */
206 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */
207};
208
209enum omap_hdmi_flags {
210 OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP = 1 << 0,
211};
212
213enum omap_dss_output_id {
214 OMAP_DSS_OUTPUT_DPI = 1 << 0,
215 OMAP_DSS_OUTPUT_DBI = 1 << 1,
216 OMAP_DSS_OUTPUT_SDI = 1 << 2,
217 OMAP_DSS_OUTPUT_DSI1 = 1 << 3,
218 OMAP_DSS_OUTPUT_DSI2 = 1 << 4,
219 OMAP_DSS_OUTPUT_VENC = 1 << 5,
220 OMAP_DSS_OUTPUT_HDMI = 1 << 6,
221};
222
223/* RFBI */
224
225struct rfbi_timings {
226 int cs_on_time;
227 int cs_off_time;
228 int we_on_time;
229 int we_off_time;
230 int re_on_time;
231 int re_off_time;
232 int we_cycle_time;
233 int re_cycle_time;
234 int cs_pulse_width;
235 int access_time;
236
237 int clk_div;
238
239 u32 tim[5]; /* set by rfbi_convert_timings() */
240
241 int converted;
242};
243
244/* DSI */
245
246enum omap_dss_dsi_trans_mode {
247 /* Sync Pulses: both sync start and end packets sent */
248 OMAP_DSS_DSI_PULSE_MODE,
249 /* Sync Events: only sync start packets sent */
250 OMAP_DSS_DSI_EVENT_MODE,
251 /* Burst: only sync start packets sent, pixels are time compressed */
252 OMAP_DSS_DSI_BURST_MODE,
253};
254
255struct omap_dss_dsi_videomode_timings {
256 unsigned long hsclk;
257
258 unsigned ndl;
259 unsigned bitspp;
260
261 /* pixels */
262 u16 hact;
263 /* lines */
264 u16 vact;
265
266 /* DSI video mode blanking data */
267 /* Unit: byte clock cycles */
268 u16 hss;
269 u16 hsa;
270 u16 hse;
271 u16 hfp;
272 u16 hbp;
273 /* Unit: line clocks */
274 u16 vsa;
275 u16 vfp;
276 u16 vbp;
277
278 /* DSI blanking modes */
279 int blanking_mode;
280 int hsa_blanking_mode;
281 int hbp_blanking_mode;
282 int hfp_blanking_mode;
283
284 enum omap_dss_dsi_trans_mode trans_mode;
285
286 bool ddr_clk_always_on;
287 int window_sync;
288};
289
290struct omap_dss_dsi_config {
291 enum omap_dss_dsi_mode mode;
292 enum omap_dss_dsi_pixel_format pixel_format;
293 const struct omap_video_timings *timings;
294
295 unsigned long hs_clk_min, hs_clk_max;
296 unsigned long lp_clk_min, lp_clk_max;
297
298 bool ddr_clk_always_on;
299 enum omap_dss_dsi_trans_mode trans_mode;
300};
301
302struct omap_video_timings {
303 /* Unit: pixels */
304 u16 x_res;
305 /* Unit: pixels */
306 u16 y_res;
307 /* Unit: Hz */
308 u32 pixelclock;
309 /* Unit: pixel clocks */
310 u16 hsw; /* Horizontal synchronization pulse width */
311 /* Unit: pixel clocks */
312 u16 hfp; /* Horizontal front porch */
313 /* Unit: pixel clocks */
314 u16 hbp; /* Horizontal back porch */
315 /* Unit: line clocks */
316 u16 vsw; /* Vertical synchronization pulse width */
317 /* Unit: line clocks */
318 u16 vfp; /* Vertical front porch */
319 /* Unit: line clocks */
320 u16 vbp; /* Vertical back porch */
321
322 /* Vsync logic level */
323 enum omap_dss_signal_level vsync_level;
324 /* Hsync logic level */
325 enum omap_dss_signal_level hsync_level;
326 /* Interlaced or Progressive timings */
327 bool interlace;
328 /* Pixel clock edge to drive LCD data */
329 enum omap_dss_signal_edge data_pclk_edge;
330 /* Data enable logic level */
331 enum omap_dss_signal_level de_level;
332 /* Pixel clock edges to drive HSYNC and VSYNC signals */
333 enum omap_dss_signal_edge sync_pclk_edge;
334
335 bool double_pixel;
336};
337
338/* Hardcoded timings for tv modes. Venc only uses these to
339 * identify the mode, and does not actually use the configs
340 * itself. However, the configs should be something that
341 * a normal monitor can also show */
342extern const struct omap_video_timings omap_dss_pal_timings;
343extern const struct omap_video_timings omap_dss_ntsc_timings;
344
345struct omap_dss_cpr_coefs {
346 s16 rr, rg, rb;
347 s16 gr, gg, gb;
348 s16 br, bg, bb;
349};
350
351struct omap_overlay_info {
352 dma_addr_t paddr;
353 dma_addr_t p_uv_addr; /* for NV12 format */
354 u16 screen_width;
355 u16 width;
356 u16 height;
357 enum omap_color_mode color_mode;
358 u8 rotation;
359 enum omap_dss_rotation_type rotation_type;
360 bool mirror;
361
362 u16 pos_x;
363 u16 pos_y;
364 u16 out_width; /* if 0, out_width == width */
365 u16 out_height; /* if 0, out_height == height */
366 u8 global_alpha;
367 u8 pre_mult_alpha;
368 u8 zorder;
369};
370
371struct omap_overlay {
372 struct kobject kobj;
373 struct list_head list;
374
375 /* static fields */
376 const char *name;
377 enum omap_plane id;
378 enum omap_color_mode supported_modes;
379 enum omap_overlay_caps caps;
380
381 /* dynamic fields */
382 struct omap_overlay_manager *manager;
383
384 /*
385 * The following functions do not block:
386 *
387 * is_enabled
388 * set_overlay_info
389 * get_overlay_info
390 *
391 * The rest of the functions may block and cannot be called from
392 * interrupt context
393 */
394
395 int (*enable)(struct omap_overlay *ovl);
396 int (*disable)(struct omap_overlay *ovl);
397 bool (*is_enabled)(struct omap_overlay *ovl);
398
399 int (*set_manager)(struct omap_overlay *ovl,
400 struct omap_overlay_manager *mgr);
401 int (*unset_manager)(struct omap_overlay *ovl);
402
403 int (*set_overlay_info)(struct omap_overlay *ovl,
404 struct omap_overlay_info *info);
405 void (*get_overlay_info)(struct omap_overlay *ovl,
406 struct omap_overlay_info *info);
407
408 int (*wait_for_go)(struct omap_overlay *ovl);
409
410 struct omap_dss_device *(*get_device)(struct omap_overlay *ovl);
411};
412
413struct omap_overlay_manager_info {
414 u32 default_color;
415
416 enum omap_dss_trans_key_type trans_key_type;
417 u32 trans_key;
418 bool trans_enabled;
419
420 bool partial_alpha_enabled;
421
422 bool cpr_enable;
423 struct omap_dss_cpr_coefs cpr_coefs;
424};
425
426struct omap_overlay_manager {
427 struct kobject kobj;
428
429 /* static fields */
430 const char *name;
431 enum omap_channel id;
432 enum omap_overlay_manager_caps caps;
433 struct list_head overlays;
434 enum omap_display_type supported_displays;
435 enum omap_dss_output_id supported_outputs;
436
437 /* dynamic fields */
438 struct omap_dss_device *output;
439
440 /*
441 * The following functions do not block:
442 *
443 * set_manager_info
444 * get_manager_info
445 * apply
446 *
447 * The rest of the functions may block and cannot be called from
448 * interrupt context
449 */
450
451 int (*set_output)(struct omap_overlay_manager *mgr,
452 struct omap_dss_device *output);
453 int (*unset_output)(struct omap_overlay_manager *mgr);
454
455 int (*set_manager_info)(struct omap_overlay_manager *mgr,
456 struct omap_overlay_manager_info *info);
457 void (*get_manager_info)(struct omap_overlay_manager *mgr,
458 struct omap_overlay_manager_info *info);
459
460 int (*apply)(struct omap_overlay_manager *mgr);
461 int (*wait_for_go)(struct omap_overlay_manager *mgr);
462 int (*wait_for_vsync)(struct omap_overlay_manager *mgr);
463
464 struct omap_dss_device *(*get_device)(struct omap_overlay_manager *mgr);
465};
466
467/* 22 pins means 1 clk lane and 10 data lanes */
468#define OMAP_DSS_MAX_DSI_PINS 22
469
470struct omap_dsi_pin_config {
471 int num_pins;
472 /*
473 * pin numbers in the following order:
474 * clk+, clk-
475 * data1+, data1-
476 * data2+, data2-
477 * ...
478 */
479 int pins[OMAP_DSS_MAX_DSI_PINS];
480};
481
482struct omap_dss_writeback_info {
483 u32 paddr;
484 u32 p_uv_addr;
485 u16 buf_width;
486 u16 width;
487 u16 height;
488 enum omap_color_mode color_mode;
489 u8 rotation;
490 enum omap_dss_rotation_type rotation_type;
491 bool mirror;
492 u8 pre_mult_alpha;
493};
494
495struct omapdss_dpi_ops {
496 int (*connect)(struct omap_dss_device *dssdev,
497 struct omap_dss_device *dst);
498 void (*disconnect)(struct omap_dss_device *dssdev,
499 struct omap_dss_device *dst);
500
501 int (*enable)(struct omap_dss_device *dssdev);
502 void (*disable)(struct omap_dss_device *dssdev);
503
504 int (*check_timings)(struct omap_dss_device *dssdev,
505 struct omap_video_timings *timings);
506 void (*set_timings)(struct omap_dss_device *dssdev,
507 struct omap_video_timings *timings);
508 void (*get_timings)(struct omap_dss_device *dssdev,
509 struct omap_video_timings *timings);
510
511 void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines);
512};
513
514struct omapdss_sdi_ops {
515 int (*connect)(struct omap_dss_device *dssdev,
516 struct omap_dss_device *dst);
517 void (*disconnect)(struct omap_dss_device *dssdev,
518 struct omap_dss_device *dst);
519
520 int (*enable)(struct omap_dss_device *dssdev);
521 void (*disable)(struct omap_dss_device *dssdev);
522
523 int (*check_timings)(struct omap_dss_device *dssdev,
524 struct omap_video_timings *timings);
525 void (*set_timings)(struct omap_dss_device *dssdev,
526 struct omap_video_timings *timings);
527 void (*get_timings)(struct omap_dss_device *dssdev,
528 struct omap_video_timings *timings);
529
530 void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs);
531};
532
533struct omapdss_dvi_ops {
534 int (*connect)(struct omap_dss_device *dssdev,
535 struct omap_dss_device *dst);
536 void (*disconnect)(struct omap_dss_device *dssdev,
537 struct omap_dss_device *dst);
538
539 int (*enable)(struct omap_dss_device *dssdev);
540 void (*disable)(struct omap_dss_device *dssdev);
541
542 int (*check_timings)(struct omap_dss_device *dssdev,
543 struct omap_video_timings *timings);
544 void (*set_timings)(struct omap_dss_device *dssdev,
545 struct omap_video_timings *timings);
546 void (*get_timings)(struct omap_dss_device *dssdev,
547 struct omap_video_timings *timings);
548};
549
550struct omapdss_atv_ops {
551 int (*connect)(struct omap_dss_device *dssdev,
552 struct omap_dss_device *dst);
553 void (*disconnect)(struct omap_dss_device *dssdev,
554 struct omap_dss_device *dst);
555
556 int (*enable)(struct omap_dss_device *dssdev);
557 void (*disable)(struct omap_dss_device *dssdev);
558
559 int (*check_timings)(struct omap_dss_device *dssdev,
560 struct omap_video_timings *timings);
561 void (*set_timings)(struct omap_dss_device *dssdev,
562 struct omap_video_timings *timings);
563 void (*get_timings)(struct omap_dss_device *dssdev,
564 struct omap_video_timings *timings);
565
566 void (*set_type)(struct omap_dss_device *dssdev,
567 enum omap_dss_venc_type type);
568 void (*invert_vid_out_polarity)(struct omap_dss_device *dssdev,
569 bool invert_polarity);
570
571 int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
572 u32 (*get_wss)(struct omap_dss_device *dssdev);
573};
574
575struct omapdss_hdmi_ops {
576 int (*connect)(struct omap_dss_device *dssdev,
577 struct omap_dss_device *dst);
578 void (*disconnect)(struct omap_dss_device *dssdev,
579 struct omap_dss_device *dst);
580
581 int (*enable)(struct omap_dss_device *dssdev);
582 void (*disable)(struct omap_dss_device *dssdev);
583
584 int (*check_timings)(struct omap_dss_device *dssdev,
585 struct omap_video_timings *timings);
586 void (*set_timings)(struct omap_dss_device *dssdev,
587 struct omap_video_timings *timings);
588 void (*get_timings)(struct omap_dss_device *dssdev,
589 struct omap_video_timings *timings);
590
591 int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
592 bool (*detect)(struct omap_dss_device *dssdev);
593
594 int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
595 int (*set_infoframe)(struct omap_dss_device *dssdev,
596 const struct hdmi_avi_infoframe *avi);
597};
598
599struct omapdss_dsi_ops {
600 int (*connect)(struct omap_dss_device *dssdev,
601 struct omap_dss_device *dst);
602 void (*disconnect)(struct omap_dss_device *dssdev,
603 struct omap_dss_device *dst);
604
605 int (*enable)(struct omap_dss_device *dssdev);
606 void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
607 bool enter_ulps);
608
609 /* bus configuration */
610 int (*set_config)(struct omap_dss_device *dssdev,
611 const struct omap_dss_dsi_config *cfg);
612 int (*configure_pins)(struct omap_dss_device *dssdev,
613 const struct omap_dsi_pin_config *pin_cfg);
614
615 void (*enable_hs)(struct omap_dss_device *dssdev, int channel,
616 bool enable);
617 int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
618
619 int (*update)(struct omap_dss_device *dssdev, int channel,
620 void (*callback)(int, void *), void *data);
621
622 void (*bus_lock)(struct omap_dss_device *dssdev);
623 void (*bus_unlock)(struct omap_dss_device *dssdev);
624
625 int (*enable_video_output)(struct omap_dss_device *dssdev, int channel);
626 void (*disable_video_output)(struct omap_dss_device *dssdev,
627 int channel);
628
629 int (*request_vc)(struct omap_dss_device *dssdev, int *channel);
630 int (*set_vc_id)(struct omap_dss_device *dssdev, int channel,
631 int vc_id);
632 void (*release_vc)(struct omap_dss_device *dssdev, int channel);
633
634 /* data transfer */
635 int (*dcs_write)(struct omap_dss_device *dssdev, int channel,
636 u8 *data, int len);
637 int (*dcs_write_nosync)(struct omap_dss_device *dssdev, int channel,
638 u8 *data, int len);
639 int (*dcs_read)(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
640 u8 *data, int len);
641
642 int (*gen_write)(struct omap_dss_device *dssdev, int channel,
643 u8 *data, int len);
644 int (*gen_write_nosync)(struct omap_dss_device *dssdev, int channel,
645 u8 *data, int len);
646 int (*gen_read)(struct omap_dss_device *dssdev, int channel,
647 u8 *reqdata, int reqlen,
648 u8 *data, int len);
649
650 int (*bta_sync)(struct omap_dss_device *dssdev, int channel);
651
652 int (*set_max_rx_packet_size)(struct omap_dss_device *dssdev,
653 int channel, u16 plen);
654};
655
656struct omap_dss_device {
657 struct kobject kobj;
658 struct device *dev;
659
660 struct module *owner;
661
662 struct list_head panel_list;
663
664 /* alias in the form of "display%d" */
665 char alias[16];
666
667 enum omap_display_type type;
668 enum omap_display_type output_type;
669
670 union {
671 struct {
672 u8 data_lines;
673 } dpi;
674
675 struct {
676 u8 channel;
677 u8 data_lines;
678 } rfbi;
679
680 struct {
681 u8 datapairs;
682 } sdi;
683
684 struct {
685 int module;
686 } dsi;
687
688 struct {
689 enum omap_dss_venc_type type;
690 bool invert_polarity;
691 } venc;
692 } phy;
693
694 struct {
695 struct omap_video_timings timings;
696
697 enum omap_dss_dsi_pixel_format dsi_pix_fmt;
698 enum omap_dss_dsi_mode dsi_mode;
699 } panel;
700
701 struct {
702 u8 pixel_size;
703 struct rfbi_timings rfbi_timings;
704 } ctrl;
705
706 const char *name;
707
708 /* used to match device to driver */
709 const char *driver_name;
710
711 void *data;
712
713 struct omap_dss_driver *driver;
714
715 union {
716 const struct omapdss_dpi_ops *dpi;
717 const struct omapdss_sdi_ops *sdi;
718 const struct omapdss_dvi_ops *dvi;
719 const struct omapdss_hdmi_ops *hdmi;
720 const struct omapdss_atv_ops *atv;
721 const struct omapdss_dsi_ops *dsi;
722 } ops;
723
724 /* helper variable for driver suspend/resume */
725 bool activate_after_resume;
726
727 enum omap_display_caps caps;
728
729 struct omap_dss_device *src;
730
731 enum omap_dss_display_state state;
732
733 /* OMAP DSS output specific fields */
734
735 struct list_head list;
736
737 /* DISPC channel for this output */
738 enum omap_channel dispc_channel;
739 bool dispc_channel_connected;
740
741 /* output instance */
742 enum omap_dss_output_id id;
743
744 /* the port number in the DT node */
745 int port_num;
746
747 /* dynamic fields */
748 struct omap_overlay_manager *manager;
749
750 struct omap_dss_device *dst;
751};
752
753struct omap_dss_driver {
754 int (*probe)(struct omap_dss_device *);
755 void (*remove)(struct omap_dss_device *);
756
757 int (*connect)(struct omap_dss_device *dssdev);
758 void (*disconnect)(struct omap_dss_device *dssdev);
759
760 int (*enable)(struct omap_dss_device *display);
761 void (*disable)(struct omap_dss_device *display);
762 int (*run_test)(struct omap_dss_device *display, int test);
763
764 int (*update)(struct omap_dss_device *dssdev,
765 u16 x, u16 y, u16 w, u16 h);
766 int (*sync)(struct omap_dss_device *dssdev);
767
768 int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
769 int (*get_te)(struct omap_dss_device *dssdev);
770
771 u8 (*get_rotate)(struct omap_dss_device *dssdev);
772 int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate);
773
774 bool (*get_mirror)(struct omap_dss_device *dssdev);
775 int (*set_mirror)(struct omap_dss_device *dssdev, bool enable);
776
777 int (*memory_read)(struct omap_dss_device *dssdev,
778 void *buf, size_t size,
779 u16 x, u16 y, u16 w, u16 h);
780
781 void (*get_resolution)(struct omap_dss_device *dssdev,
782 u16 *xres, u16 *yres);
783 void (*get_dimensions)(struct omap_dss_device *dssdev,
784 u32 *width, u32 *height);
785 int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
786
787 int (*check_timings)(struct omap_dss_device *dssdev,
788 struct omap_video_timings *timings);
789 void (*set_timings)(struct omap_dss_device *dssdev,
790 struct omap_video_timings *timings);
791 void (*get_timings)(struct omap_dss_device *dssdev,
792 struct omap_video_timings *timings);
793
794 int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
795 u32 (*get_wss)(struct omap_dss_device *dssdev);
796
797 int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
798 bool (*detect)(struct omap_dss_device *dssdev);
799
800 int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
801 int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev,
802 const struct hdmi_avi_infoframe *avi);
803};
804
805enum omapdss_version omapdss_get_version(void);
806bool omapdss_is_initialized(void);
807
808int omap_dss_register_driver(struct omap_dss_driver *);
809void omap_dss_unregister_driver(struct omap_dss_driver *);
810
811int omapdss_register_display(struct omap_dss_device *dssdev);
812void omapdss_unregister_display(struct omap_dss_device *dssdev);
813
814struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev);
815void omap_dss_put_device(struct omap_dss_device *dssdev);
816#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
817struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
818struct omap_dss_device *omap_dss_find_device(void *data,
819 int (*match)(struct omap_dss_device *dssdev, void *data));
820const char *omapdss_get_default_display_name(void);
821
822void videomode_to_omap_video_timings(const struct videomode *vm,
823 struct omap_video_timings *ovt);
824void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
825 struct videomode *vm);
826
827int dss_feat_get_num_mgrs(void);
828int dss_feat_get_num_ovls(void);
829enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
830
831
832
833int omap_dss_get_num_overlay_managers(void);
834struct omap_overlay_manager *omap_dss_get_overlay_manager(int num);
835
836int omap_dss_get_num_overlays(void);
837struct omap_overlay *omap_dss_get_overlay(int num);
838
839int omapdss_register_output(struct omap_dss_device *output);
840void omapdss_unregister_output(struct omap_dss_device *output);
841struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id);
842struct omap_dss_device *omap_dss_find_output(const char *name);
843struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port);
844int omapdss_output_set_device(struct omap_dss_device *out,
845 struct omap_dss_device *dssdev);
846int omapdss_output_unset_device(struct omap_dss_device *out);
847
848struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev);
849struct omap_overlay_manager *omapdss_find_mgr_from_display(struct omap_dss_device *dssdev);
850
851void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
852 u16 *xres, u16 *yres);
853int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
854void omapdss_default_get_timings(struct omap_dss_device *dssdev,
855 struct omap_video_timings *timings);
856
857typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
858int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
859int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
860
861int omapdss_compat_init(void);
862void omapdss_compat_uninit(void);
863
864static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
865{
866 return dssdev->src;
867}
868
869static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
870{
871 return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
872}
873
874struct device_node *
875omapdss_of_get_next_port(const struct device_node *parent,
876 struct device_node *prev);
877
878struct device_node *
879omapdss_of_get_next_endpoint(const struct device_node *parent,
880 struct device_node *prev);
881
882struct device_node *
883omapdss_of_get_first_endpoint(const struct device_node *parent);
884
885struct omap_dss_device *
886omapdss_of_find_source_for_first_ep(struct device_node *node);
22 887
23u32 dispc_read_irqstatus(void); 888u32 dispc_read_irqstatus(void);
24void dispc_clear_irqstatus(u32 mask); 889void dispc_clear_irqstatus(u32 mask);
@@ -44,6 +909,10 @@ void dispc_mgr_set_timings(enum omap_channel channel,
44 const struct omap_video_timings *timings); 909 const struct omap_video_timings *timings);
45void dispc_mgr_setup(enum omap_channel channel, 910void dispc_mgr_setup(enum omap_channel channel,
46 const struct omap_overlay_manager_info *info); 911 const struct omap_overlay_manager_info *info);
912u32 dispc_mgr_gamma_size(enum omap_channel channel);
913void dispc_mgr_set_gamma(enum omap_channel channel,
914 const struct drm_color_lut *lut,
915 unsigned int length);
47 916
48int dispc_ovl_enable(enum omap_plane plane, bool enable); 917int dispc_ovl_enable(enum omap_plane plane, bool enable);
49bool dispc_ovl_enabled(enum omap_plane plane); 918bool dispc_ovl_enabled(enum omap_plane plane);
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 829232ad8c81..24f859488201 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -21,8 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/of.h> 22#include <linux/of.h>
23 23
24#include <video/omapdss.h> 24#include "omapdss.h"
25
26#include "dss.h" 25#include "dss.h"
27 26
28static LIST_HEAD(output_list); 27static LIST_HEAD(output_list);
diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c
index f974ddcd3b6e..0a76c89cdc2e 100644
--- a/drivers/gpu/drm/omapdrm/dss/pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/pll.c
@@ -22,8 +22,7 @@
22#include <linux/regulator/consumer.h> 22#include <linux/regulator/consumer.h>
23#include <linux/sched.h> 23#include <linux/sched.h>
24 24
25#include <video/omapdss.h> 25#include "omapdss.h"
26
27#include "dss.h" 26#include "dss.h"
28 27
29#define PLL_CONTROL 0x0000 28#define PLL_CONTROL 0x0000
@@ -76,6 +75,59 @@ struct dss_pll *dss_pll_find(const char *name)
76 return NULL; 75 return NULL;
77} 76}
78 77
78struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src)
79{
80 struct dss_pll *pll;
81
82 switch (src) {
83 default:
84 case DSS_CLK_SRC_FCK:
85 return NULL;
86
87 case DSS_CLK_SRC_HDMI_PLL:
88 return dss_pll_find("hdmi");
89
90 case DSS_CLK_SRC_PLL1_1:
91 case DSS_CLK_SRC_PLL1_2:
92 case DSS_CLK_SRC_PLL1_3:
93 pll = dss_pll_find("dsi0");
94 if (!pll)
95 pll = dss_pll_find("video0");
96 return pll;
97
98 case DSS_CLK_SRC_PLL2_1:
99 case DSS_CLK_SRC_PLL2_2:
100 case DSS_CLK_SRC_PLL2_3:
101 pll = dss_pll_find("dsi1");
102 if (!pll)
103 pll = dss_pll_find("video1");
104 return pll;
105 }
106}
107
108unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src)
109{
110 switch (src) {
111 case DSS_CLK_SRC_HDMI_PLL:
112 return 0;
113
114 case DSS_CLK_SRC_PLL1_1:
115 case DSS_CLK_SRC_PLL2_1:
116 return 0;
117
118 case DSS_CLK_SRC_PLL1_2:
119 case DSS_CLK_SRC_PLL2_2:
120 return 1;
121
122 case DSS_CLK_SRC_PLL1_3:
123 case DSS_CLK_SRC_PLL2_3:
124 return 2;
125
126 default:
127 return 0;
128 }
129}
130
79int dss_pll_enable(struct dss_pll *pll) 131int dss_pll_enable(struct dss_pll *pll)
80{ 132{
81 int r; 133 int r;
@@ -129,7 +181,7 @@ int dss_pll_set_config(struct dss_pll *pll, const struct dss_pll_clock_info *cin
129 return 0; 181 return 0;
130} 182}
131 183
132bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco, 184bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco,
133 unsigned long out_min, unsigned long out_max, 185 unsigned long out_min, unsigned long out_max,
134 dss_hsdiv_calc_func func, void *data) 186 dss_hsdiv_calc_func func, void *data)
135{ 187{
@@ -154,7 +206,11 @@ bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco,
154 return false; 206 return false;
155} 207}
156 208
157bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin, 209/*
210 * clkdco = clkin / n * m * 2
211 * clkoutX = clkdco / mX
212 */
213bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
158 unsigned long pll_min, unsigned long pll_max, 214 unsigned long pll_min, unsigned long pll_max,
159 dss_pll_calc_func func, void *data) 215 dss_pll_calc_func func, void *data)
160{ 216{
@@ -195,6 +251,71 @@ bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin,
195 return false; 251 return false;
196} 252}
197 253
254/*
255 * This calculates a PLL config that will provide the target_clkout rate
256 * for clkout. Additionally clkdco rate will be the same as clkout rate
257 * when clkout rate is >= min_clkdco.
258 *
259 * clkdco = clkin / n * m + clkin / n * mf / 262144
260 * clkout = clkdco / m2
261 */
262bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin,
263 unsigned long target_clkout, struct dss_pll_clock_info *cinfo)
264{
265 unsigned long fint, clkdco, clkout;
266 unsigned long target_clkdco;
267 unsigned long min_dco;
268 unsigned n, m, mf, m2, sd;
269 const struct dss_pll_hw *hw = pll->hw;
270
271 DSSDBG("clkin %lu, target clkout %lu\n", clkin, target_clkout);
272
273 /* Fint */
274 n = DIV_ROUND_UP(clkin, hw->fint_max);
275 fint = clkin / n;
276
277 /* adjust m2 so that the clkdco will be high enough */
278 min_dco = roundup(hw->clkdco_min, fint);
279 m2 = DIV_ROUND_UP(min_dco, target_clkout);
280 if (m2 == 0)
281 m2 = 1;
282
283 target_clkdco = target_clkout * m2;
284 m = target_clkdco / fint;
285
286 clkdco = fint * m;
287
288 /* adjust clkdco with fractional mf */
289 if (WARN_ON(target_clkdco - clkdco > fint))
290 mf = 0;
291 else
292 mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint);
293
294 if (mf > 0)
295 clkdco += (u32)div_u64((u64)mf * fint, 262144);
296
297 clkout = clkdco / m2;
298
299 /* sigma-delta */
300 sd = DIV_ROUND_UP(fint * m, 250000000);
301
302 DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n",
303 n, m, mf, m2, sd);
304 DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout);
305
306 cinfo->n = n;
307 cinfo->m = m;
308 cinfo->mf = mf;
309 cinfo->mX[0] = m2;
310 cinfo->sd = sd;
311
312 cinfo->fint = fint;
313 cinfo->clkdco = clkdco;
314 cinfo->clkout[0] = clkout;
315
316 return true;
317}
318
198static int wait_for_bit_change(void __iomem *reg, int bitnum, int value) 319static int wait_for_bit_change(void __iomem *reg, int bitnum, int value)
199{ 320{
200 unsigned long timeout; 321 unsigned long timeout;
diff --git a/drivers/gpu/drm/omapdrm/dss/rfbi.c b/drivers/gpu/drm/omapdrm/dss/rfbi.c
index 3796576dfadf..cd53566d75eb 100644
--- a/drivers/gpu/drm/omapdrm/dss/rfbi.c
+++ b/drivers/gpu/drm/omapdrm/dss/rfbi.c
@@ -38,7 +38,7 @@
38#include <linux/pm_runtime.h> 38#include <linux/pm_runtime.h>
39#include <linux/component.h> 39#include <linux/component.h>
40 40
41#include <video/omapdss.h> 41#include "omapdss.h"
42#include "dss.h" 42#include "dss.h"
43 43
44struct rfbi_reg { u16 idx; }; 44struct rfbi_reg { u16 idx; };
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index cd6d3bfb041d..0a96c321ce62 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -29,7 +29,7 @@
29#include <linux/of.h> 29#include <linux/of.h>
30#include <linux/component.h> 30#include <linux/component.h>
31 31
32#include <video/omapdss.h> 32#include "omapdss.h"
33#include "dss.h" 33#include "dss.h"
34 34
35static struct { 35static struct {
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 08a2cc778ba9..6eedf2118708 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -37,8 +37,7 @@
37#include <linux/of.h> 37#include <linux/of.h>
38#include <linux/component.h> 38#include <linux/component.h>
39 39
40#include <video/omapdss.h> 40#include "omapdss.h"
41
42#include "dss.h" 41#include "dss.h"
43#include "dss_features.h" 42#include "dss_features.h"
44 43
diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c
index b1ec59e42940..7429de928d4e 100644
--- a/drivers/gpu/drm/omapdrm/dss/video-pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c
@@ -17,8 +17,7 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19 19
20#include <video/omapdss.h> 20#include "omapdss.h"
21
22#include "dss.h" 21#include "dss.h"
23#include "dss_features.h" 22#include "dss_features.h"
24 23
@@ -108,6 +107,8 @@ static const struct dss_pll_ops dss_pll_ops = {
108}; 107};
109 108
110static const struct dss_pll_hw dss_dra7_video_pll_hw = { 109static const struct dss_pll_hw dss_dra7_video_pll_hw = {
110 .type = DSS_PLL_TYPE_A,
111
111 .n_max = (1 << 8) - 1, 112 .n_max = (1 << 8) - 1,
112 .m_max = (1 << 12) - 1, 113 .m_max = (1 << 12) - 1,
113 .mX_max = (1 << 5) - 1, 114 .mX_max = (1 << 5) - 1,
@@ -124,6 +125,10 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = {
124 .mX_lsb[0] = 21, 125 .mX_lsb[0] = 21,
125 .mX_msb[1] = 30, 126 .mX_msb[1] = 30,
126 .mX_lsb[1] = 26, 127 .mX_lsb[1] = 26,
128 .mX_msb[2] = 4,
129 .mX_lsb[2] = 0,
130 .mX_msb[3] = 9,
131 .mX_lsb[3] = 5,
127 132
128 .has_refsel = true, 133 .has_refsel = true,
129}; 134};
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index ce2d67b6a8c7..137fe690a0da 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -32,7 +32,6 @@
32struct omap_connector { 32struct omap_connector {
33 struct drm_connector base; 33 struct drm_connector base;
34 struct omap_dss_device *dssdev; 34 struct omap_dss_device *dssdev;
35 struct drm_encoder *encoder;
36 bool hdmi_mode; 35 bool hdmi_mode;
37}; 36};
38 37
@@ -256,13 +255,6 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
256 return ret; 255 return ret;
257} 256}
258 257
259struct drm_encoder *omap_connector_attached_encoder(
260 struct drm_connector *connector)
261{
262 struct omap_connector *omap_connector = to_omap_connector(connector);
263 return omap_connector->encoder;
264}
265
266static const struct drm_connector_funcs omap_connector_funcs = { 258static const struct drm_connector_funcs omap_connector_funcs = {
267 .dpms = drm_atomic_helper_connector_dpms, 259 .dpms = drm_atomic_helper_connector_dpms,
268 .reset = drm_atomic_helper_connector_reset, 260 .reset = drm_atomic_helper_connector_reset,
@@ -276,7 +268,6 @@ static const struct drm_connector_funcs omap_connector_funcs = {
276static const struct drm_connector_helper_funcs omap_connector_helper_funcs = { 268static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
277 .get_modes = omap_connector_get_modes, 269 .get_modes = omap_connector_get_modes,
278 .mode_valid = omap_connector_mode_valid, 270 .mode_valid = omap_connector_mode_valid,
279 .best_encoder = omap_connector_attached_encoder,
280}; 271};
281 272
282/* initialize connector */ 273/* initialize connector */
@@ -296,7 +287,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
296 goto fail; 287 goto fail;
297 288
298 omap_connector->dssdev = dssdev; 289 omap_connector->dssdev = dssdev;
299 omap_connector->encoder = encoder;
300 290
301 connector = &omap_connector->base; 291 connector = &omap_connector->base;
302 292
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 075f2bb44867..180f644e861e 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -372,6 +372,20 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
372 copy_timings_drm_to_omap(&omap_crtc->timings, mode); 372 copy_timings_drm_to_omap(&omap_crtc->timings, mode);
373} 373}
374 374
375static int omap_crtc_atomic_check(struct drm_crtc *crtc,
376 struct drm_crtc_state *state)
377{
378 if (state->color_mgmt_changed && state->gamma_lut) {
379 uint length = state->gamma_lut->length /
380 sizeof(struct drm_color_lut);
381
382 if (length < 2)
383 return -EINVAL;
384 }
385
386 return 0;
387}
388
375static void omap_crtc_atomic_begin(struct drm_crtc *crtc, 389static void omap_crtc_atomic_begin(struct drm_crtc *crtc,
376 struct drm_crtc_state *old_crtc_state) 390 struct drm_crtc_state *old_crtc_state)
377{ 391{
@@ -384,6 +398,32 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
384 398
385 WARN_ON(omap_crtc->vblank_irq.registered); 399 WARN_ON(omap_crtc->vblank_irq.registered);
386 400
401 if (crtc->state->color_mgmt_changed) {
402 struct drm_color_lut *lut = NULL;
403 uint length = 0;
404
405 if (crtc->state->gamma_lut) {
406 lut = (struct drm_color_lut *)
407 crtc->state->gamma_lut->data;
408 length = crtc->state->gamma_lut->length /
409 sizeof(*lut);
410 }
411 dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
412 }
413
414 if (crtc->state->color_mgmt_changed) {
415 struct drm_color_lut *lut = NULL;
416 uint length = 0;
417
418 if (crtc->state->gamma_lut) {
419 lut = (struct drm_color_lut *)
420 crtc->state->gamma_lut->data;
421 length = crtc->state->gamma_lut->length /
422 sizeof(*lut);
423 }
424 dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
425 }
426
387 if (dispc_mgr_is_enabled(omap_crtc->channel)) { 427 if (dispc_mgr_is_enabled(omap_crtc->channel)) {
388 428
389 DBG("%s: GO", omap_crtc->name); 429 DBG("%s: GO", omap_crtc->name);
@@ -460,6 +500,7 @@ static const struct drm_crtc_funcs omap_crtc_funcs = {
460 .set_config = drm_atomic_helper_set_config, 500 .set_config = drm_atomic_helper_set_config,
461 .destroy = omap_crtc_destroy, 501 .destroy = omap_crtc_destroy,
462 .page_flip = drm_atomic_helper_page_flip, 502 .page_flip = drm_atomic_helper_page_flip,
503 .gamma_set = drm_atomic_helper_legacy_gamma_set,
463 .set_property = drm_atomic_helper_crtc_set_property, 504 .set_property = drm_atomic_helper_crtc_set_property,
464 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 505 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
465 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 506 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
@@ -471,6 +512,7 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
471 .mode_set_nofb = omap_crtc_mode_set_nofb, 512 .mode_set_nofb = omap_crtc_mode_set_nofb,
472 .disable = omap_crtc_disable, 513 .disable = omap_crtc_disable,
473 .enable = omap_crtc_enable, 514 .enable = omap_crtc_enable,
515 .atomic_check = omap_crtc_atomic_check,
474 .atomic_begin = omap_crtc_atomic_begin, 516 .atomic_begin = omap_crtc_atomic_begin,
475 .atomic_flush = omap_crtc_atomic_flush, 517 .atomic_flush = omap_crtc_atomic_flush,
476}; 518};
@@ -534,6 +576,20 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
534 576
535 drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs); 577 drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
536 578
579 /* The dispc API adapts to what ever size, but the HW supports
580 * 256 element gamma table for LCDs and 1024 element table for
581 * OMAP_DSS_CHANNEL_DIGIT. X server assumes 256 element gamma
582 * tables so lets use that. Size of HW gamma table can be
583 * extracted with dispc_mgr_gamma_size(). If it returns 0
584 * gamma table is not supprted.
585 */
586 if (dispc_mgr_gamma_size(channel)) {
587 uint gamma_lut_size = 256;
588
589 drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size);
590 drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
591 }
592
537 omap_plane_install_properties(crtc->primary, &crtc->base); 593 omap_plane_install_properties(crtc->primary, &crtc->base);
538 594
539 omap_crtcs[channel] = omap_crtc; 595 omap_crtcs[channel] = omap_crtc;
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index 6f5fc14fc015..479bf24050f8 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -17,6 +17,8 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <linux/seq_file.h>
21
20#include <drm/drm_crtc.h> 22#include <drm/drm_crtc.h>
21#include <drm/drm_fb_helper.h> 23#include <drm/drm_fb_helper.h>
22 24
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index de275a5be1db..4ceed7a9762f 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -27,6 +27,7 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/platform_device.h> /* platform_device() */ 28#include <linux/platform_device.h> /* platform_device() */
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/seq_file.h>
30#include <linux/slab.h> 31#include <linux/slab.h>
31#include <linux/time.h> 32#include <linux/time.h>
32#include <linux/vmalloc.h> 33#include <linux/vmalloc.h>
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index d86f5479345b..6b97011154bf 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -142,8 +142,9 @@ static int omap_atomic_commit(struct drm_device *dev,
142{ 142{
143 struct omap_drm_private *priv = dev->dev_private; 143 struct omap_drm_private *priv = dev->dev_private;
144 struct omap_atomic_state_commit *commit; 144 struct omap_atomic_state_commit *commit;
145 unsigned int i; 145 struct drm_crtc *crtc;
146 int ret; 146 struct drm_crtc_state *crtc_state;
147 int i, ret;
147 148
148 ret = drm_atomic_helper_prepare_planes(dev, state); 149 ret = drm_atomic_helper_prepare_planes(dev, state);
149 if (ret) 150 if (ret)
@@ -163,10 +164,8 @@ static int omap_atomic_commit(struct drm_device *dev,
163 /* Wait until all affected CRTCs have completed previous commits and 164 /* Wait until all affected CRTCs have completed previous commits and
164 * mark them as pending. 165 * mark them as pending.
165 */ 166 */
166 for (i = 0; i < dev->mode_config.num_crtc; ++i) { 167 for_each_crtc_in_state(state, crtc, crtc_state, i)
167 if (state->crtcs[i]) 168 commit->crtcs |= drm_crtc_mask(crtc);
168 commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
169 }
170 169
171 wait_event(priv->commit.wait, !omap_atomic_is_pending(priv, commit)); 170 wait_event(priv->commit.wait, !omap_atomic_is_pending(priv, commit));
172 171
@@ -175,7 +174,7 @@ static int omap_atomic_commit(struct drm_device *dev,
175 spin_unlock(&priv->commit.lock); 174 spin_unlock(&priv->commit.lock);
176 175
177 /* Swap the state, this is the point of no return. */ 176 /* Swap the state, this is the point of no return. */
178 drm_atomic_helper_swap_state(dev, state); 177 drm_atomic_helper_swap_state(state, true);
179 178
180 if (nonblock) 179 if (nonblock)
181 schedule_work(&commit->work); 180 schedule_work(&commit->work);
@@ -203,6 +202,8 @@ static int get_connector_type(struct omap_dss_device *dssdev)
203 return DRM_MODE_CONNECTOR_HDMIA; 202 return DRM_MODE_CONNECTOR_HDMIA;
204 case OMAP_DISPLAY_TYPE_DVI: 203 case OMAP_DISPLAY_TYPE_DVI:
205 return DRM_MODE_CONNECTOR_DVID; 204 return DRM_MODE_CONNECTOR_DVID;
205 case OMAP_DISPLAY_TYPE_DSI:
206 return DRM_MODE_CONNECTOR_DSI;
206 default: 207 default:
207 return DRM_MODE_CONNECTOR_Unknown; 208 return DRM_MODE_CONNECTOR_Unknown;
208 } 209 }
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 3f823c368912..dcc30a98b9d4 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -24,7 +24,6 @@
24#include <linux/platform_data/omap_drm.h> 24#include <linux/platform_data/omap_drm.h>
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/wait.h> 26#include <linux/wait.h>
27#include <video/omapdss.h>
28 27
29#include <drm/drmP.h> 28#include <drm/drmP.h>
30#include <drm/drm_crtc_helper.h> 29#include <drm/drm_crtc_helper.h>
@@ -183,7 +182,6 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
183 struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); 182 struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
184struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, 183struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
185 const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); 184 const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
186struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
187int omap_framebuffer_pin(struct drm_framebuffer *fb); 185int omap_framebuffer_pin(struct drm_framebuffer *fb);
188void omap_framebuffer_unpin(struct drm_framebuffer *fb); 186void omap_framebuffer_unpin(struct drm_framebuffer *fb);
189void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, 187void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
@@ -231,7 +229,6 @@ int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
231 int x, int y, dma_addr_t *paddr); 229 int x, int y, dma_addr_t *paddr);
232uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj); 230uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
233size_t omap_gem_mmap_size(struct drm_gem_object *obj); 231size_t omap_gem_mmap_size(struct drm_gem_object *obj);
234int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h);
235int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient); 232int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient);
236 233
237struct dma_buf *omap_gem_prime_export(struct drm_device *dev, 234struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
@@ -239,17 +236,6 @@ struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
239struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, 236struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
240 struct dma_buf *buffer); 237 struct dma_buf *buffer);
241 238
242static inline int align_pitch(int pitch, int width, int bpp)
243{
244 int bytespp = (bpp + 7) / 8;
245 /* in case someone tries to feed us a completely bogus stride: */
246 pitch = max(pitch, width * bytespp);
247 /* PVR needs alignment to 8 pixels.. right now that is the most
248 * restrictive stride requirement..
249 */
250 return roundup(pitch, 8 * bytespp);
251}
252
253/* map crtc to vblank mask */ 239/* map crtc to vblank mask */
254uint32_t pipe2vbl(struct drm_crtc *crtc); 240uint32_t pipe2vbl(struct drm_crtc *crtc);
255struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder); 241struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 94ec06d3d737..983c8cf2441c 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -17,6 +17,8 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <linux/seq_file.h>
21
20#include <drm/drm_crtc.h> 22#include <drm/drm_crtc.h>
21#include <drm/drm_crtc_helper.h> 23#include <drm/drm_crtc_helper.h>
22 24
@@ -120,17 +122,9 @@ static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
120 kfree(omap_fb); 122 kfree(omap_fb);
121} 123}
122 124
123static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
124 struct drm_file *file_priv, unsigned flags, unsigned color,
125 struct drm_clip_rect *clips, unsigned num_clips)
126{
127 return 0;
128}
129
130static const struct drm_framebuffer_funcs omap_framebuffer_funcs = { 125static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
131 .create_handle = omap_framebuffer_create_handle, 126 .create_handle = omap_framebuffer_create_handle,
132 .destroy = omap_framebuffer_destroy, 127 .destroy = omap_framebuffer_destroy,
133 .dirty = omap_framebuffer_dirty,
134}; 128};
135 129
136static uint32_t get_linear_addr(struct plane *plane, 130static uint32_t get_linear_addr(struct plane *plane,
@@ -318,14 +312,6 @@ void omap_framebuffer_unpin(struct drm_framebuffer *fb)
318 mutex_unlock(&omap_fb->lock); 312 mutex_unlock(&omap_fb->lock);
319} 313}
320 314
321struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
322{
323 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
324 if (p >= drm_format_num_planes(fb->pixel_format))
325 return NULL;
326 return omap_fb->planes[p].bo;
327}
328
329/* iterate thru all the connectors, returning ones that are attached 315/* iterate thru all the connectors, returning ones that are attached
330 * to the same fb.. 316 * to the same fb..
331 */ 317 */
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 89da41ac64d2..31dfa0893416 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -125,9 +125,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
125 mode_cmd.width = sizes->surface_width; 125 mode_cmd.width = sizes->surface_width;
126 mode_cmd.height = sizes->surface_height; 126 mode_cmd.height = sizes->surface_height;
127 127
128 mode_cmd.pitches[0] = align_pitch( 128 mode_cmd.pitches[0] =
129 mode_cmd.width * ((sizes->surface_bpp + 7) / 8), 129 DIV_ROUND_UP(mode_cmd.width * sizes->surface_bpp, 8);
130 mode_cmd.width, sizes->surface_bpp);
131 130
132 fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled; 131 fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
133 if (fbdev->ywrap_enabled) { 132 if (fbdev->ywrap_enabled) {
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index b97afc281778..9b3f565fd8d7 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -17,6 +17,7 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <linux/seq_file.h>
20#include <linux/shmem_fs.h> 21#include <linux/shmem_fs.h>
21#include <linux/spinlock.h> 22#include <linux/spinlock.h>
22#include <linux/pfn_t.h> 23#include <linux/pfn_t.h>
@@ -382,18 +383,6 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj)
382 return size; 383 return size;
383} 384}
384 385
385/* get tiled size, returns -EINVAL if not tiled buffer */
386int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
387{
388 struct omap_gem_object *omap_obj = to_omap_bo(obj);
389 if (omap_obj->flags & OMAP_BO_TILED) {
390 *w = omap_obj->width;
391 *h = omap_obj->height;
392 return 0;
393 }
394 return -EINVAL;
395}
396
397/* ----------------------------------------------------------------------------- 386/* -----------------------------------------------------------------------------
398 * Fault Handling 387 * Fault Handling
399 */ 388 */
@@ -660,7 +649,8 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
660{ 649{
661 union omap_gem_size gsize; 650 union omap_gem_size gsize;
662 651
663 args->pitch = align_pitch(0, args->width, args->bpp); 652 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
653
664 args->size = PAGE_ALIGN(args->pitch * args->height); 654 args->size = PAGE_ALIGN(args->pitch * args->height);
665 655
666 gsize = (union omap_gem_size){ 656 gsize = (union omap_gem_size){
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 8b5d54385892..ad429683fef7 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -221,7 +221,6 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
221{ 221{
222 struct drm_device *dev = crtc->dev; 222 struct drm_device *dev = crtc->dev;
223 struct qxl_device *qdev = dev->dev_private; 223 struct qxl_device *qdev = dev->dev_private;
224 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
225 struct qxl_framebuffer *qfb_src = to_qxl_framebuffer(fb); 224 struct qxl_framebuffer *qfb_src = to_qxl_framebuffer(fb);
226 struct qxl_framebuffer *qfb_old = to_qxl_framebuffer(crtc->primary->fb); 225 struct qxl_framebuffer *qfb_old = to_qxl_framebuffer(crtc->primary->fb);
227 struct qxl_bo *bo_old = gem_to_qxl_bo(qfb_old->obj); 226 struct qxl_bo *bo_old = gem_to_qxl_bo(qfb_old->obj);
@@ -252,14 +251,14 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
252 qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0, 251 qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0,
253 &norect, one_clip_rect, inc); 252 &norect, one_clip_rect, inc);
254 253
255 drm_vblank_get(dev, qcrtc->index); 254 drm_crtc_vblank_get(crtc);
256 255
257 if (event) { 256 if (event) {
258 spin_lock_irqsave(&dev->event_lock, flags); 257 spin_lock_irqsave(&dev->event_lock, flags);
259 drm_send_vblank_event(dev, qcrtc->index, event); 258 drm_crtc_send_vblank_event(crtc, event);
260 spin_unlock_irqrestore(&dev->event_lock, flags); 259 spin_unlock_irqrestore(&dev->event_lock, flags);
261 } 260 }
262 drm_vblank_put(dev, qcrtc->index); 261 drm_crtc_vblank_put(crtc);
263 262
264 ret = qxl_bo_reserve(bo, false); 263 ret = qxl_bo_reserve(bo, false);
265 if (!ret) { 264 if (!ret) {
@@ -730,7 +729,6 @@ static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
730 729
731 drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs); 730 drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs);
732 qxl_crtc->index = crtc_id; 731 qxl_crtc->index = crtc_id;
733 drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256);
734 drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs); 732 drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
735 return 0; 733 return 0;
736} 734}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index dc9df5fe50ba..460bbceae297 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -256,7 +256,7 @@ static struct drm_driver qxl_driver = {
256 .gem_prime_vmap = qxl_gem_prime_vmap, 256 .gem_prime_vmap = qxl_gem_prime_vmap,
257 .gem_prime_vunmap = qxl_gem_prime_vunmap, 257 .gem_prime_vunmap = qxl_gem_prime_vunmap,
258 .gem_prime_mmap = qxl_gem_prime_mmap, 258 .gem_prime_mmap = qxl_gem_prime_mmap,
259 .gem_free_object = qxl_gem_object_free, 259 .gem_free_object_unlocked = qxl_gem_object_free,
260 .gem_open_object = qxl_gem_object_open, 260 .gem_open_object = qxl_gem_object_open,
261 .gem_close_object = qxl_gem_object_close, 261 .gem_close_object = qxl_gem_object_close,
262 .fops = &qxl_fops, 262 .fops = &qxl_fops,
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 5ea57f6320b8..df2657051afd 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -131,10 +131,6 @@ static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
131 int ret; 131 int ret;
132 int aligned_size, size; 132 int aligned_size, size;
133 int height = mode_cmd->height; 133 int height = mode_cmd->height;
134 int bpp;
135 int depth;
136
137 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
138 134
139 size = mode_cmd->pitches[0] * height; 135 size = mode_cmd->pitches[0] * height;
140 aligned_size = ALIGN(size, PAGE_SIZE); 136 aligned_size = ALIGN(size, PAGE_SIZE);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 4efa8e261baf..f599cd073b72 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -96,7 +96,7 @@ retry:
96 return 0; 96 return 0;
97 97
98 if (have_drawable_releases && sc > 300) { 98 if (have_drawable_releases && sc > 300) {
99 FENCE_WARN(fence, "failed to wait on release %d " 99 FENCE_WARN(fence, "failed to wait on release %llu "
100 "after spincount %d\n", 100 "after spincount %d\n",
101 fence->context & ~0xf0000000, sc); 101 fence->context & ~0xf0000000, sc);
102 goto signaled; 102 goto signaled;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 2e216e2ea78c..e91763d5d800 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -276,14 +276,14 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
276 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); 276 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
277 atombios_blank_crtc(crtc, ATOM_DISABLE); 277 atombios_blank_crtc(crtc, ATOM_DISABLE);
278 if (dev->num_crtcs > radeon_crtc->crtc_id) 278 if (dev->num_crtcs > radeon_crtc->crtc_id)
279 drm_vblank_on(dev, radeon_crtc->crtc_id); 279 drm_crtc_vblank_on(crtc);
280 radeon_crtc_load_lut(crtc); 280 radeon_crtc_load_lut(crtc);
281 break; 281 break;
282 case DRM_MODE_DPMS_STANDBY: 282 case DRM_MODE_DPMS_STANDBY:
283 case DRM_MODE_DPMS_SUSPEND: 283 case DRM_MODE_DPMS_SUSPEND:
284 case DRM_MODE_DPMS_OFF: 284 case DRM_MODE_DPMS_OFF:
285 if (dev->num_crtcs > radeon_crtc->crtc_id) 285 if (dev->num_crtcs > radeon_crtc->crtc_id)
286 drm_vblank_off(dev, radeon_crtc->crtc_id); 286 drm_crtc_vblank_off(crtc);
287 if (radeon_crtc->enabled) 287 if (radeon_crtc->enabled)
288 atombios_blank_crtc(crtc, ATOM_ENABLE); 288 atombios_blank_crtc(crtc, ATOM_ENABLE);
289 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) 289 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 80b24a495d6c..5633ee3eb46e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2386,7 +2386,7 @@ struct radeon_device {
2386 struct radeon_mman mman; 2386 struct radeon_mman mman;
2387 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; 2387 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
2388 wait_queue_head_t fence_queue; 2388 wait_queue_head_t fence_queue;
2389 unsigned fence_context; 2389 u64 fence_context;
2390 struct mutex ring_lock; 2390 struct mutex ring_lock;
2391 struct radeon_ring ring[RADEON_NUM_RINGS]; 2391 struct radeon_ring ring[RADEON_NUM_RINGS];
2392 bool ib_pool_ready; 2392 bool ib_pool_ready;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6a41b4982647..3965d1916b9c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -231,19 +231,21 @@ void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
231 *blue = radeon_crtc->lut_b[regno] << 6; 231 *blue = radeon_crtc->lut_b[regno] << 6;
232} 232}
233 233
234static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 234static int radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
235 u16 *blue, uint32_t start, uint32_t size) 235 u16 *blue, uint32_t size)
236{ 236{
237 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 237 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
238 int end = (start + size > 256) ? 256 : start + size, i; 238 int i;
239 239
240 /* userspace palettes are always correct as is */ 240 /* userspace palettes are always correct as is */
241 for (i = start; i < end; i++) { 241 for (i = 0; i < size; i++) {
242 radeon_crtc->lut_r[i] = red[i] >> 6; 242 radeon_crtc->lut_r[i] = red[i] >> 6;
243 radeon_crtc->lut_g[i] = green[i] >> 6; 243 radeon_crtc->lut_g[i] = green[i] >> 6;
244 radeon_crtc->lut_b[i] = blue[i] >> 6; 244 radeon_crtc->lut_b[i] = blue[i] >> 6;
245 } 245 }
246 radeon_crtc_load_lut(crtc); 246 radeon_crtc_load_lut(crtc);
247
248 return 0;
247} 249}
248 250
249static void radeon_crtc_destroy(struct drm_crtc *crtc) 251static void radeon_crtc_destroy(struct drm_crtc *crtc)
@@ -381,7 +383,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
381 383
382 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 384 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
383 385
384 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); 386 drm_crtc_vblank_put(&radeon_crtc->base);
385 radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); 387 radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
386 queue_work(radeon_crtc->flip_queue, &work->unpin_work); 388 queue_work(radeon_crtc->flip_queue, &work->unpin_work);
387} 389}
@@ -598,7 +600,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
598 } 600 }
599 work->base = base; 601 work->base = base;
600 602
601 r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); 603 r = drm_crtc_vblank_get(crtc);
602 if (r) { 604 if (r) {
603 DRM_ERROR("failed to get vblank before flip\n"); 605 DRM_ERROR("failed to get vblank before flip\n");
604 goto pflip_cleanup; 606 goto pflip_cleanup;
@@ -625,7 +627,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
625 return 0; 627 return 0;
626 628
627vblank_cleanup: 629vblank_cleanup:
628 drm_vblank_put(crtc->dev, radeon_crtc->crtc_id); 630 drm_crtc_vblank_put(crtc);
629 631
630pflip_cleanup: 632pflip_cleanup:
631 if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) { 633 if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
@@ -688,6 +690,7 @@ radeon_crtc_set_config(struct drm_mode_set *set)
688 pm_runtime_put_autosuspend(dev->dev); 690 pm_runtime_put_autosuspend(dev->dev);
689 return ret; 691 return ret;
690} 692}
693
691static const struct drm_crtc_funcs radeon_crtc_funcs = { 694static const struct drm_crtc_funcs radeon_crtc_funcs = {
692 .cursor_set2 = radeon_crtc_cursor_set2, 695 .cursor_set2 = radeon_crtc_cursor_set2,
693 .cursor_move = radeon_crtc_cursor_move, 696 .cursor_move = radeon_crtc_cursor_move,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index b55aa740171f..a455dc7d4aa1 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -34,11 +34,9 @@
34#include "radeon_drv.h" 34#include "radeon_drv.h"
35 35
36#include <drm/drm_pciids.h> 36#include <drm/drm_pciids.h>
37#include <linux/apple-gmux.h>
38#include <linux/console.h> 37#include <linux/console.h>
39#include <linux/module.h> 38#include <linux/module.h>
40#include <linux/pm_runtime.h> 39#include <linux/pm_runtime.h>
41#include <linux/vgaarb.h>
42#include <linux/vga_switcheroo.h> 40#include <linux/vga_switcheroo.h>
43#include <drm/drm_gem.h> 41#include <drm/drm_gem.h>
44 42
@@ -340,13 +338,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
340 if (ret == -EPROBE_DEFER) 338 if (ret == -EPROBE_DEFER)
341 return ret; 339 return ret;
342 340
343 /* 341 if (vga_switcheroo_client_probe_defer(pdev))
344 * apple-gmux is needed on dual GPU MacBook Pro
345 * to probe the panel if we're the inactive GPU.
346 */
347 if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
348 apple_gmux_present() && pdev != vga_default_device() &&
349 !vga_switcheroo_handler_flags())
350 return -EPROBE_DEFER; 342 return -EPROBE_DEFER;
351 343
352 /* Get rid of things like offb */ 344 /* Get rid of things like offb */
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 478d4099b0d0..d0de4022fff9 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -332,14 +332,14 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
332 WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl)); 332 WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
333 } 333 }
334 if (dev->num_crtcs > radeon_crtc->crtc_id) 334 if (dev->num_crtcs > radeon_crtc->crtc_id)
335 drm_vblank_on(dev, radeon_crtc->crtc_id); 335 drm_crtc_vblank_on(crtc);
336 radeon_crtc_load_lut(crtc); 336 radeon_crtc_load_lut(crtc);
337 break; 337 break;
338 case DRM_MODE_DPMS_STANDBY: 338 case DRM_MODE_DPMS_STANDBY:
339 case DRM_MODE_DPMS_SUSPEND: 339 case DRM_MODE_DPMS_SUSPEND:
340 case DRM_MODE_DPMS_OFF: 340 case DRM_MODE_DPMS_OFF:
341 if (dev->num_crtcs > radeon_crtc->crtc_id) 341 if (dev->num_crtcs > radeon_crtc->crtc_id)
342 drm_vblank_off(dev, radeon_crtc->crtc_id); 342 drm_crtc_vblank_off(crtc);
343 if (radeon_crtc->crtc_id) 343 if (radeon_crtc->crtc_id)
344 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask)); 344 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
345 else { 345 else {
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 38226d925a5b..4b6542538ff9 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -246,6 +246,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
246 246
247static void radeon_pm_set_clocks(struct radeon_device *rdev) 247static void radeon_pm_set_clocks(struct radeon_device *rdev)
248{ 248{
249 struct drm_crtc *crtc;
249 int i, r; 250 int i, r;
250 251
251 /* no need to take locks, etc. if nothing's going to change */ 252 /* no need to take locks, etc. if nothing's going to change */
@@ -274,26 +275,30 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
274 radeon_unmap_vram_bos(rdev); 275 radeon_unmap_vram_bos(rdev);
275 276
276 if (rdev->irq.installed) { 277 if (rdev->irq.installed) {
277 for (i = 0; i < rdev->num_crtc; i++) { 278 i = 0;
279 drm_for_each_crtc(crtc, rdev->ddev) {
278 if (rdev->pm.active_crtcs & (1 << i)) { 280 if (rdev->pm.active_crtcs & (1 << i)) {
279 /* This can fail if a modeset is in progress */ 281 /* This can fail if a modeset is in progress */
280 if (drm_vblank_get(rdev->ddev, i) == 0) 282 if (drm_crtc_vblank_get(crtc) == 0)
281 rdev->pm.req_vblank |= (1 << i); 283 rdev->pm.req_vblank |= (1 << i);
282 else 284 else
283 DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n", 285 DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
284 i); 286 i);
285 } 287 }
288 i++;
286 } 289 }
287 } 290 }
288 291
289 radeon_set_power_state(rdev); 292 radeon_set_power_state(rdev);
290 293
291 if (rdev->irq.installed) { 294 if (rdev->irq.installed) {
292 for (i = 0; i < rdev->num_crtc; i++) { 295 i = 0;
296 drm_for_each_crtc(crtc, rdev->ddev) {
293 if (rdev->pm.req_vblank & (1 << i)) { 297 if (rdev->pm.req_vblank & (1 << i)) {
294 rdev->pm.req_vblank &= ~(1 << i); 298 rdev->pm.req_vblank &= ~(1 << i);
295 drm_vblank_put(rdev->ddev, i); 299 drm_crtc_vblank_put(crtc);
296 } 300 }
301 i++;
297 } 302 }
298 } 303 }
299 304
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index fb9242d27883..48ec4b6e8b26 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -217,7 +217,7 @@ static struct drm_driver rcar_du_driver = {
217 .get_vblank_counter = drm_vblank_no_hw_counter, 217 .get_vblank_counter = drm_vblank_no_hw_counter,
218 .enable_vblank = rcar_du_enable_vblank, 218 .enable_vblank = rcar_du_enable_vblank,
219 .disable_vblank = rcar_du_disable_vblank, 219 .disable_vblank = rcar_du_disable_vblank,
220 .gem_free_object = drm_gem_cma_free_object, 220 .gem_free_object_unlocked = drm_gem_cma_free_object,
221 .gem_vm_ops = &drm_gem_cma_vm_ops, 221 .gem_vm_ops = &drm_gem_cma_vm_ops,
222 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 222 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
223 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 223 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 4e939e41f030..55149e9ce28e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -27,18 +27,6 @@
27#include "rcar_du_vgacon.h" 27#include "rcar_du_vgacon.h"
28 28
29/* ----------------------------------------------------------------------------- 29/* -----------------------------------------------------------------------------
30 * Common connector functions
31 */
32
33struct drm_encoder *
34rcar_du_connector_best_encoder(struct drm_connector *connector)
35{
36 struct rcar_du_connector *rcon = to_rcar_connector(connector);
37
38 return rcar_encoder_to_drm_encoder(rcon->encoder);
39}
40
41/* -----------------------------------------------------------------------------
42 * Encoder 30 * Encoder
43 */ 31 */
44 32
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index 719b6f2a031c..a8669c3e0dd5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -49,9 +49,6 @@ struct rcar_du_connector {
49#define to_rcar_connector(c) \ 49#define to_rcar_connector(c) \
50 container_of(c, struct rcar_du_connector, connector) 50 container_of(c, struct rcar_du_connector, connector)
51 51
52struct drm_encoder *
53rcar_du_connector_best_encoder(struct drm_connector *connector);
54
55int rcar_du_encoder_init(struct rcar_du_device *rcdu, 52int rcar_du_encoder_init(struct rcar_du_device *rcdu,
56 enum rcar_du_encoder_type type, 53 enum rcar_du_encoder_type type,
57 enum rcar_du_output output, 54 enum rcar_du_output output,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
index 6c927144b5c9..612b4d5ae098 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
@@ -52,7 +52,6 @@ static int rcar_du_hdmi_connector_mode_valid(struct drm_connector *connector,
52static const struct drm_connector_helper_funcs connector_helper_funcs = { 52static const struct drm_connector_helper_funcs connector_helper_funcs = {
53 .get_modes = rcar_du_hdmi_connector_get_modes, 53 .get_modes = rcar_du_hdmi_connector_get_modes,
54 .mode_valid = rcar_du_hdmi_connector_mode_valid, 54 .mode_valid = rcar_du_hdmi_connector_mode_valid,
55 .best_encoder = rcar_du_connector_best_encoder,
56}; 55};
57 56
58static enum drm_connector_status 57static enum drm_connector_status
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index e70a4f33d970..6bb032d8ac6b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -288,6 +288,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
288{ 288{
289 struct rcar_du_device *rcdu = dev->dev_private; 289 struct rcar_du_device *rcdu = dev->dev_private;
290 struct rcar_du_commit *commit; 290 struct rcar_du_commit *commit;
291 struct drm_crtc *crtc;
292 struct drm_crtc_state *crtc_state;
291 unsigned int i; 293 unsigned int i;
292 int ret; 294 int ret;
293 295
@@ -309,10 +311,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
309 /* Wait until all affected CRTCs have completed previous commits and 311 /* Wait until all affected CRTCs have completed previous commits and
310 * mark them as pending. 312 * mark them as pending.
311 */ 313 */
312 for (i = 0; i < dev->mode_config.num_crtc; ++i) { 314 for_each_crtc_in_state(state, crtc, crtc_state, i)
313 if (state->crtcs[i]) 315 commit->crtcs |= drm_crtc_mask(crtc);
314 commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
315 }
316 316
317 spin_lock(&rcdu->commit.wait.lock); 317 spin_lock(&rcdu->commit.wait.lock);
318 ret = wait_event_interruptible_locked(rcdu->commit.wait, 318 ret = wait_event_interruptible_locked(rcdu->commit.wait,
@@ -327,7 +327,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
327 } 327 }
328 328
329 /* Swap the state, this is the point of no return. */ 329 /* Swap the state, this is the point of no return. */
330 drm_atomic_helper_swap_state(dev, state); 330 drm_atomic_helper_swap_state(state, true);
331 331
332 if (nonblock) 332 if (nonblock)
333 schedule_work(&commit->work); 333 schedule_work(&commit->work);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index e905f5da7aaa..6afd0af312ba 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -59,7 +59,6 @@ static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
59 59
60static const struct drm_connector_helper_funcs connector_helper_funcs = { 60static const struct drm_connector_helper_funcs connector_helper_funcs = {
61 .get_modes = rcar_du_lvds_connector_get_modes, 61 .get_modes = rcar_du_lvds_connector_get_modes,
62 .best_encoder = rcar_du_connector_best_encoder,
63}; 62};
64 63
65static enum drm_connector_status 64static enum drm_connector_status
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index d445e67f78e1..bfe31ca870cc 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -140,18 +140,17 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
140 bool needs_realloc = false; 140 bool needs_realloc = false;
141 unsigned int groups = 0; 141 unsigned int groups = 0;
142 unsigned int i; 142 unsigned int i;
143 struct drm_plane *drm_plane;
144 struct drm_plane_state *drm_plane_state;
143 145
144 /* Check if hardware planes need to be reallocated. */ 146 /* Check if hardware planes need to be reallocated. */
145 for (i = 0; i < dev->mode_config.num_total_plane; ++i) { 147 for_each_plane_in_state(state, drm_plane, drm_plane_state, i) {
146 struct rcar_du_plane_state *plane_state; 148 struct rcar_du_plane_state *plane_state;
147 struct rcar_du_plane *plane; 149 struct rcar_du_plane *plane;
148 unsigned int index; 150 unsigned int index;
149 151
150 if (!state->planes[i]) 152 plane = to_rcar_plane(drm_plane);
151 continue; 153 plane_state = to_rcar_plane_state(drm_plane_state);
152
153 plane = to_rcar_plane(state->planes[i]);
154 plane_state = to_rcar_plane_state(state->plane_states[i]);
155 154
156 dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__, 155 dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__,
157 plane->group->index, plane - plane->group->planes); 156 plane->group->index, plane - plane->group->planes);
@@ -247,18 +246,15 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
247 } 246 }
248 247
249 /* Reallocate hardware planes for each plane that needs it. */ 248 /* Reallocate hardware planes for each plane that needs it. */
250 for (i = 0; i < dev->mode_config.num_total_plane; ++i) { 249 for_each_plane_in_state(state, drm_plane, drm_plane_state, i) {
251 struct rcar_du_plane_state *plane_state; 250 struct rcar_du_plane_state *plane_state;
252 struct rcar_du_plane *plane; 251 struct rcar_du_plane *plane;
253 unsigned int crtc_planes; 252 unsigned int crtc_planes;
254 unsigned int free; 253 unsigned int free;
255 int idx; 254 int idx;
256 255
257 if (!state->planes[i]) 256 plane = to_rcar_plane(drm_plane);
258 continue; 257 plane_state = to_rcar_plane_state(drm_plane_state);
259
260 plane = to_rcar_plane(state->planes[i]);
261 plane_state = to_rcar_plane_state(state->plane_states[i]);
262 258
263 dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__, 259 dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__,
264 plane->group->index, plane - plane->group->planes); 260 plane->group->index, plane - plane->group->planes);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 9d7e5c99caf6..8d6125c1c0f9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -28,7 +28,6 @@ static int rcar_du_vga_connector_get_modes(struct drm_connector *connector)
28 28
29static const struct drm_connector_helper_funcs connector_helper_funcs = { 29static const struct drm_connector_helper_funcs connector_helper_funcs = {
30 .get_modes = rcar_du_vga_connector_get_modes, 30 .get_modes = rcar_du_vga_connector_get_modes,
31 .best_encoder = rcar_du_connector_best_encoder,
32}; 31};
33 32
34static enum drm_connector_status 33static enum drm_connector_status
@@ -79,7 +78,5 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
79 if (ret < 0) 78 if (ret < 0)
80 return ret; 79 return ret;
81 80
82 rcon->encoder = renc;
83
84 return 0; 81 return 0;
85} 82}
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 7f6a55cae27a..c120172add5c 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -349,20 +349,11 @@ static int rockchip_dp_remove(struct platform_device *pdev)
349 return 0; 349 return 0;
350} 350}
351 351
352static const struct dev_pm_ops rockchip_dp_pm_ops = {
352#ifdef CONFIG_PM_SLEEP 353#ifdef CONFIG_PM_SLEEP
353static int rockchip_dp_suspend(struct device *dev) 354 .suspend = analogix_dp_suspend,
354{ 355 .resume_early = analogix_dp_resume,
355 return analogix_dp_suspend(dev);
356}
357
358static int rockchip_dp_resume(struct device *dev)
359{
360 return analogix_dp_resume(dev);
361}
362#endif 356#endif
363
364static const struct dev_pm_ops rockchip_dp_pm_ops = {
365 SET_SYSTEM_SLEEP_PM_OPS(rockchip_dp_suspend, rockchip_dp_resume)
366}; 357};
367 358
368static const struct of_device_id rockchip_dp_dt_ids[] = { 359static const struct of_device_id rockchip_dp_dt_ids[] = {
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index dedc65b40f36..ca22e5ee89ca 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -964,18 +964,9 @@ static enum drm_mode_status dw_mipi_dsi_mode_valid(
964 return mode_status; 964 return mode_status;
965} 965}
966 966
967static struct drm_encoder *dw_mipi_dsi_connector_best_encoder(
968 struct drm_connector *connector)
969{
970 struct dw_mipi_dsi *dsi = con_to_dsi(connector);
971
972 return &dsi->encoder;
973}
974
975static struct drm_connector_helper_funcs dw_mipi_dsi_connector_helper_funcs = { 967static struct drm_connector_helper_funcs dw_mipi_dsi_connector_helper_funcs = {
976 .get_modes = dw_mipi_dsi_connector_get_modes, 968 .get_modes = dw_mipi_dsi_connector_get_modes,
977 .mode_valid = dw_mipi_dsi_mode_valid, 969 .mode_valid = dw_mipi_dsi_mode_valid,
978 .best_encoder = dw_mipi_dsi_connector_best_encoder,
979}; 970};
980 971
981static enum drm_connector_status 972static enum drm_connector_status
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index f8b4feb60b25..006260de9dbd 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -579,14 +579,6 @@ inno_hdmi_connector_mode_valid(struct drm_connector *connector,
579 return MODE_OK; 579 return MODE_OK;
580} 580}
581 581
582static struct drm_encoder *
583inno_hdmi_connector_best_encoder(struct drm_connector *connector)
584{
585 struct inno_hdmi *hdmi = to_inno_hdmi(connector);
586
587 return &hdmi->encoder;
588}
589
590static int 582static int
591inno_hdmi_probe_single_connector_modes(struct drm_connector *connector, 583inno_hdmi_probe_single_connector_modes(struct drm_connector *connector,
592 uint32_t maxX, uint32_t maxY) 584 uint32_t maxX, uint32_t maxY)
@@ -613,7 +605,6 @@ static struct drm_connector_funcs inno_hdmi_connector_funcs = {
613static struct drm_connector_helper_funcs inno_hdmi_connector_helper_funcs = { 605static struct drm_connector_helper_funcs inno_hdmi_connector_helper_funcs = {
614 .get_modes = inno_hdmi_connector_get_modes, 606 .get_modes = inno_hdmi_connector_get_modes,
615 .mode_valid = inno_hdmi_connector_mode_valid, 607 .mode_valid = inno_hdmi_connector_mode_valid,
616 .best_encoder = inno_hdmi_connector_best_encoder,
617}; 608};
618 609
619static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi) 610static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index a409d1f703cb..c2bcc5ea1abe 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -19,11 +19,13 @@
19#include <drm/drmP.h> 19#include <drm/drmP.h>
20#include <drm/drm_crtc_helper.h> 20#include <drm/drm_crtc_helper.h>
21#include <drm/drm_fb_helper.h> 21#include <drm/drm_fb_helper.h>
22#include <drm/drm_gem_cma_helper.h>
22#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
23#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
24#include <linux/module.h> 25#include <linux/module.h>
25#include <linux/of_graph.h> 26#include <linux/of_graph.h>
26#include <linux/component.h> 27#include <linux/component.h>
28#include <linux/console.h>
27 29
28#include "rockchip_drm_drv.h" 30#include "rockchip_drm_drv.h"
29#include "rockchip_drm_fb.h" 31#include "rockchip_drm_fb.h"
@@ -37,6 +39,7 @@
37#define DRIVER_MINOR 0 39#define DRIVER_MINOR 0
38 40
39static bool is_support_iommu = true; 41static bool is_support_iommu = true;
42static struct drm_driver rockchip_drm_driver;
40 43
41/* 44/*
42 * Attach a (component) device to the shared drm dma mapping from master drm 45 * Attach a (component) device to the shared drm dma mapping from master drm
@@ -132,20 +135,28 @@ static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev,
132 priv->crtc_funcs[pipe]->disable_vblank(crtc); 135 priv->crtc_funcs[pipe]->disable_vblank(crtc);
133} 136}
134 137
135static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags) 138static int rockchip_drm_bind(struct device *dev)
136{ 139{
140 struct drm_device *drm_dev;
137 struct rockchip_drm_private *private; 141 struct rockchip_drm_private *private;
138 struct dma_iommu_mapping *mapping = NULL; 142 struct dma_iommu_mapping *mapping = NULL;
139 struct device *dev = drm_dev->dev;
140 struct drm_connector *connector;
141 int ret; 143 int ret;
142 144
143 private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL); 145 drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
144 if (!private) 146 if (!drm_dev)
145 return -ENOMEM; 147 return -ENOMEM;
146 148
147 mutex_init(&private->commit.lock); 149 ret = drm_dev_register(drm_dev, 0);
148 INIT_WORK(&private->commit.work, rockchip_drm_atomic_work); 150 if (ret)
151 goto err_free;
152
153 dev_set_drvdata(dev, drm_dev);
154
155 private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
156 if (!private) {
157 ret = -ENOMEM;
158 goto err_unregister;
159 }
149 160
150 drm_dev->dev_private = private; 161 drm_dev->dev_private = private;
151 162
@@ -186,21 +197,10 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
186 if (ret) 197 if (ret)
187 goto err_detach_device; 198 goto err_detach_device;
188 199
189 /* 200 ret = drm_connector_register_all(drm_dev);
190 * All components are now added, we can publish the connector sysfs 201 if (ret) {
191 * entries to userspace. This will generate hotplug events and so 202 dev_err(dev, "failed to register connectors\n");
192 * userspace will expect to be able to access DRM at this point. 203 goto err_unbind;
193 */
194 list_for_each_entry(connector, &drm_dev->mode_config.connector_list,
195 head) {
196 ret = drm_connector_register(connector);
197 if (ret) {
198 dev_err(drm_dev->dev,
199 "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
200 connector->base.id,
201 connector->name, ret);
202 goto err_unbind;
203 }
204 } 204 }
205 205
206 /* init kms poll for handling hpd */ 206 /* init kms poll for handling hpd */
@@ -240,12 +240,16 @@ err_release_mapping:
240err_config_cleanup: 240err_config_cleanup:
241 drm_mode_config_cleanup(drm_dev); 241 drm_mode_config_cleanup(drm_dev);
242 drm_dev->dev_private = NULL; 242 drm_dev->dev_private = NULL;
243err_unregister:
244 drm_dev_unregister(drm_dev);
245err_free:
246 drm_dev_unref(drm_dev);
243 return ret; 247 return ret;
244} 248}
245 249
246static int rockchip_drm_unload(struct drm_device *drm_dev) 250static void rockchip_drm_unbind(struct device *dev)
247{ 251{
248 struct device *dev = drm_dev->dev; 252 struct drm_device *drm_dev = dev_get_drvdata(dev);
249 253
250 rockchip_drm_fbdev_fini(drm_dev); 254 rockchip_drm_fbdev_fini(drm_dev);
251 drm_vblank_cleanup(drm_dev); 255 drm_vblank_cleanup(drm_dev);
@@ -255,29 +259,9 @@ static int rockchip_drm_unload(struct drm_device *drm_dev)
255 arm_iommu_detach_device(dev); 259 arm_iommu_detach_device(dev);
256 drm_mode_config_cleanup(drm_dev); 260 drm_mode_config_cleanup(drm_dev);
257 drm_dev->dev_private = NULL; 261 drm_dev->dev_private = NULL;
258 262 drm_dev_unregister(drm_dev);
259 return 0; 263 drm_dev_unref(drm_dev);
260} 264 dev_set_drvdata(dev, NULL);
261
262static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
263 struct drm_file *file_priv)
264{
265 struct rockchip_drm_private *priv = crtc->dev->dev_private;
266 int pipe = drm_crtc_index(crtc);
267
268 if (pipe < ROCKCHIP_MAX_CRTC &&
269 priv->crtc_funcs[pipe] &&
270 priv->crtc_funcs[pipe]->cancel_pending_vblank)
271 priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv);
272}
273
274static void rockchip_drm_preclose(struct drm_device *dev,
275 struct drm_file *file_priv)
276{
277 struct drm_crtc *crtc;
278
279 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
280 rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv);
281} 265}
282 266
283void rockchip_drm_lastclose(struct drm_device *dev) 267void rockchip_drm_lastclose(struct drm_device *dev)
@@ -300,23 +284,15 @@ static const struct file_operations rockchip_drm_driver_fops = {
300 .release = drm_release, 284 .release = drm_release,
301}; 285};
302 286
303const struct vm_operations_struct rockchip_drm_vm_ops = {
304 .open = drm_gem_vm_open,
305 .close = drm_gem_vm_close,
306};
307
308static struct drm_driver rockchip_drm_driver = { 287static struct drm_driver rockchip_drm_driver = {
309 .driver_features = DRIVER_MODESET | DRIVER_GEM | 288 .driver_features = DRIVER_MODESET | DRIVER_GEM |
310 DRIVER_PRIME | DRIVER_ATOMIC, 289 DRIVER_PRIME | DRIVER_ATOMIC,
311 .load = rockchip_drm_load,
312 .unload = rockchip_drm_unload,
313 .preclose = rockchip_drm_preclose,
314 .lastclose = rockchip_drm_lastclose, 290 .lastclose = rockchip_drm_lastclose,
315 .get_vblank_counter = drm_vblank_no_hw_counter, 291 .get_vblank_counter = drm_vblank_no_hw_counter,
316 .enable_vblank = rockchip_drm_crtc_enable_vblank, 292 .enable_vblank = rockchip_drm_crtc_enable_vblank,
317 .disable_vblank = rockchip_drm_crtc_disable_vblank, 293 .disable_vblank = rockchip_drm_crtc_disable_vblank,
318 .gem_vm_ops = &rockchip_drm_vm_ops, 294 .gem_vm_ops = &drm_gem_cma_vm_ops,
319 .gem_free_object = rockchip_gem_free_object, 295 .gem_free_object_unlocked = rockchip_gem_free_object,
320 .dumb_create = rockchip_gem_dumb_create, 296 .dumb_create = rockchip_gem_dumb_create,
321 .dumb_map_offset = rockchip_gem_dumb_map_offset, 297 .dumb_map_offset = rockchip_gem_dumb_map_offset,
322 .dumb_destroy = drm_gem_dumb_destroy, 298 .dumb_destroy = drm_gem_dumb_destroy,
@@ -337,25 +313,38 @@ static struct drm_driver rockchip_drm_driver = {
337}; 313};
338 314
339#ifdef CONFIG_PM_SLEEP 315#ifdef CONFIG_PM_SLEEP
340static int rockchip_drm_sys_suspend(struct device *dev) 316void rockchip_drm_fb_suspend(struct drm_device *drm)
341{ 317{
342 struct drm_device *drm = dev_get_drvdata(dev); 318 struct rockchip_drm_private *priv = drm->dev_private;
343 struct drm_connector *connector;
344 319
345 if (!drm) 320 console_lock();
346 return 0; 321 drm_fb_helper_set_suspend(&priv->fbdev_helper, 1);
322 console_unlock();
323}
324
325void rockchip_drm_fb_resume(struct drm_device *drm)
326{
327 struct rockchip_drm_private *priv = drm->dev_private;
347 328
348 drm_modeset_lock_all(drm); 329 console_lock();
349 list_for_each_entry(connector, &drm->mode_config.connector_list, head) { 330 drm_fb_helper_set_suspend(&priv->fbdev_helper, 0);
350 int old_dpms = connector->dpms; 331 console_unlock();
332}
351 333
352 if (connector->funcs->dpms) 334static int rockchip_drm_sys_suspend(struct device *dev)
353 connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); 335{
336 struct drm_device *drm = dev_get_drvdata(dev);
337 struct rockchip_drm_private *priv = drm->dev_private;
354 338
355 /* Set the old mode back to the connector for resume */ 339 drm_kms_helper_poll_disable(drm);
356 connector->dpms = old_dpms; 340 rockchip_drm_fb_suspend(drm);
341
342 priv->state = drm_atomic_helper_suspend(drm);
343 if (IS_ERR(priv->state)) {
344 rockchip_drm_fb_resume(drm);
345 drm_kms_helper_poll_enable(drm);
346 return PTR_ERR(priv->state);
357 } 347 }
358 drm_modeset_unlock_all(drm);
359 348
360 return 0; 349 return 0;
361} 350}
@@ -363,47 +352,11 @@ static int rockchip_drm_sys_suspend(struct device *dev)
363static int rockchip_drm_sys_resume(struct device *dev) 352static int rockchip_drm_sys_resume(struct device *dev)
364{ 353{
365 struct drm_device *drm = dev_get_drvdata(dev); 354 struct drm_device *drm = dev_get_drvdata(dev);
366 struct drm_connector *connector; 355 struct rockchip_drm_private *priv = drm->dev_private;
367 enum drm_connector_status status;
368 bool changed = false;
369
370 if (!drm)
371 return 0;
372
373 drm_modeset_lock_all(drm);
374 list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
375 int desired_mode = connector->dpms;
376
377 /*
378 * at suspend time, we save dpms to connector->dpms,
379 * restore the old_dpms, and at current time, the connector
380 * dpms status must be DRM_MODE_DPMS_OFF.
381 */
382 connector->dpms = DRM_MODE_DPMS_OFF;
383
384 /*
385 * If the connector has been disconnected during suspend,
386 * disconnect it from the encoder and leave it off. We'll notify
387 * userspace at the end.
388 */
389 if (desired_mode == DRM_MODE_DPMS_ON) {
390 status = connector->funcs->detect(connector, true);
391 if (status == connector_status_disconnected) {
392 connector->encoder = NULL;
393 connector->status = status;
394 changed = true;
395 continue;
396 }
397 }
398 if (connector->funcs->dpms)
399 connector->funcs->dpms(connector, desired_mode);
400 }
401 drm_modeset_unlock_all(drm);
402
403 drm_helper_resume_force_mode(drm);
404 356
405 if (changed) 357 drm_atomic_helper_resume(drm, priv->state);
406 drm_kms_helper_hotplug_event(drm); 358 rockchip_drm_fb_resume(drm);
359 drm_kms_helper_poll_enable(drm);
407 360
408 return 0; 361 return 0;
409} 362}
@@ -444,37 +397,6 @@ static void rockchip_add_endpoints(struct device *dev,
444 } 397 }
445} 398}
446 399
447static int rockchip_drm_bind(struct device *dev)
448{
449 struct drm_device *drm;
450 int ret;
451
452 drm = drm_dev_alloc(&rockchip_drm_driver, dev);
453 if (!drm)
454 return -ENOMEM;
455
456 ret = drm_dev_register(drm, 0);
457 if (ret)
458 goto err_free;
459
460 dev_set_drvdata(dev, drm);
461
462 return 0;
463
464err_free:
465 drm_dev_unref(drm);
466 return ret;
467}
468
469static void rockchip_drm_unbind(struct device *dev)
470{
471 struct drm_device *drm = dev_get_drvdata(dev);
472
473 drm_dev_unregister(drm);
474 drm_dev_unref(drm);
475 dev_set_drvdata(dev, NULL);
476}
477
478static const struct component_master_ops rockchip_drm_ops = { 400static const struct component_master_ops rockchip_drm_ops = {
479 .bind = rockchip_drm_bind, 401 .bind = rockchip_drm_bind,
480 .unbind = rockchip_drm_unbind, 402 .unbind = rockchip_drm_unbind,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 56f43a364c7f..ea3932940061 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -40,14 +40,6 @@ struct rockchip_crtc_funcs {
40 int (*enable_vblank)(struct drm_crtc *crtc); 40 int (*enable_vblank)(struct drm_crtc *crtc);
41 void (*disable_vblank)(struct drm_crtc *crtc); 41 void (*disable_vblank)(struct drm_crtc *crtc);
42 void (*wait_for_update)(struct drm_crtc *crtc); 42 void (*wait_for_update)(struct drm_crtc *crtc);
43 void (*cancel_pending_vblank)(struct drm_crtc *crtc, struct drm_file *file_priv);
44};
45
46struct rockchip_atomic_commit {
47 struct work_struct work;
48 struct drm_atomic_state *state;
49 struct drm_device *dev;
50 struct mutex lock;
51}; 43};
52 44
53struct rockchip_crtc_state { 45struct rockchip_crtc_state {
@@ -68,11 +60,9 @@ struct rockchip_drm_private {
68 struct drm_fb_helper fbdev_helper; 60 struct drm_fb_helper fbdev_helper;
69 struct drm_gem_object *fbdev_bo; 61 struct drm_gem_object *fbdev_bo;
70 const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC]; 62 const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
71 63 struct drm_atomic_state *state;
72 struct rockchip_atomic_commit commit;
73}; 64};
74 65
75void rockchip_drm_atomic_work(struct work_struct *work);
76int rockchip_register_crtc_funcs(struct drm_crtc *crtc, 66int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
77 const struct rockchip_crtc_funcs *crtc_funcs); 67 const struct rockchip_crtc_funcs *crtc_funcs);
78void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc); 68void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 755cfdba61cd..20f12bc5a386 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -228,87 +228,32 @@ rockchip_atomic_wait_for_complete(struct drm_device *dev, struct drm_atomic_stat
228} 228}
229 229
230static void 230static void
231rockchip_atomic_commit_complete(struct rockchip_atomic_commit *commit) 231rockchip_atomic_commit_tail(struct drm_atomic_state *state)
232{ 232{
233 struct drm_atomic_state *state = commit->state; 233 struct drm_device *dev = state->dev;
234 struct drm_device *dev = commit->dev;
235 234
236 /*
237 * TODO: do fence wait here.
238 */
239
240 /*
241 * Rockchip crtc support runtime PM, can't update display planes
242 * when crtc is disabled.
243 *
244 * drm_atomic_helper_commit comments detail that:
245 * For drivers supporting runtime PM the recommended sequence is
246 *
247 * drm_atomic_helper_commit_modeset_disables(dev, state);
248 *
249 * drm_atomic_helper_commit_modeset_enables(dev, state);
250 *
251 * drm_atomic_helper_commit_planes(dev, state, true);
252 *
253 * See the kerneldoc entries for these three functions for more details.
254 */
255 drm_atomic_helper_commit_modeset_disables(dev, state); 235 drm_atomic_helper_commit_modeset_disables(dev, state);
256 236
257 drm_atomic_helper_commit_modeset_enables(dev, state); 237 drm_atomic_helper_commit_modeset_enables(dev, state);
258 238
259 drm_atomic_helper_commit_planes(dev, state, true); 239 drm_atomic_helper_commit_planes(dev, state, true);
260 240
241 drm_atomic_helper_commit_hw_done(state);
242
261 rockchip_atomic_wait_for_complete(dev, state); 243 rockchip_atomic_wait_for_complete(dev, state);
262 244
263 drm_atomic_helper_cleanup_planes(dev, state); 245 drm_atomic_helper_cleanup_planes(dev, state);
264
265 drm_atomic_state_free(state);
266}
267
268void rockchip_drm_atomic_work(struct work_struct *work)
269{
270 struct rockchip_atomic_commit *commit = container_of(work,
271 struct rockchip_atomic_commit, work);
272
273 rockchip_atomic_commit_complete(commit);
274} 246}
275 247
276int rockchip_drm_atomic_commit(struct drm_device *dev, 248struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
277 struct drm_atomic_state *state, 249 .atomic_commit_tail = rockchip_atomic_commit_tail,
278 bool nonblock) 250};
279{
280 struct rockchip_drm_private *private = dev->dev_private;
281 struct rockchip_atomic_commit *commit = &private->commit;
282 int ret;
283
284 ret = drm_atomic_helper_prepare_planes(dev, state);
285 if (ret)
286 return ret;
287
288 /* serialize outstanding nonblocking commits */
289 mutex_lock(&commit->lock);
290 flush_work(&commit->work);
291
292 drm_atomic_helper_swap_state(dev, state);
293
294 commit->dev = dev;
295 commit->state = state;
296
297 if (nonblock)
298 schedule_work(&commit->work);
299 else
300 rockchip_atomic_commit_complete(commit);
301
302 mutex_unlock(&commit->lock);
303
304 return 0;
305}
306 251
307static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { 252static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
308 .fb_create = rockchip_user_fb_create, 253 .fb_create = rockchip_user_fb_create,
309 .output_poll_changed = rockchip_drm_output_poll_changed, 254 .output_poll_changed = rockchip_drm_output_poll_changed,
310 .atomic_check = drm_atomic_helper_check, 255 .atomic_check = drm_atomic_helper_check,
311 .atomic_commit = rockchip_drm_atomic_commit, 256 .atomic_commit = drm_atomic_helper_commit,
312}; 257};
313 258
314struct drm_framebuffer * 259struct drm_framebuffer *
@@ -339,4 +284,5 @@ void rockchip_drm_mode_config_init(struct drm_device *dev)
339 dev->mode_config.max_height = 4096; 284 dev->mode_config.max_height = 4096;
340 285
341 dev->mode_config.funcs = &rockchip_drm_mode_config_funcs; 286 dev->mode_config.funcs = &rockchip_drm_mode_config_funcs;
287 dev->mode_config.helper_private = &rockchip_mode_config_helpers;
342} 288}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index f261512bb4a0..207e01de6e32 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -108,7 +108,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
108 fbi->screen_size = rk_obj->base.size; 108 fbi->screen_size = rk_obj->base.size;
109 fbi->fix.smem_len = rk_obj->base.size; 109 fbi->fix.smem_len = rk_obj->base.size;
110 110
111 DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n", 111 DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n",
112 fb->width, fb->height, fb->depth, rk_obj->kvaddr, 112 fb->width, fb->height, fb->depth, rk_obj->kvaddr,
113 offset, size); 113 offset, size);
114 114
@@ -156,9 +156,6 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
156 goto err_drm_fb_helper_fini; 156 goto err_drm_fb_helper_fini;
157 } 157 }
158 158
159 /* disable all the possible outputs/crtcs before entering KMS mode */
160 drm_helper_disable_unused_functions(dev);
161
162 ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP); 159 ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
163 if (ret < 0) { 160 if (ret < 0) {
164 dev_err(dev->dev, "Failed to set initial hw config - %d.\n", 161 dev_err(dev->dev, "Failed to set initial hw config - %d.\n",
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 9c2d8a894093..059e902f872d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -38,7 +38,7 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
38 &rk_obj->dma_addr, GFP_KERNEL, 38 &rk_obj->dma_addr, GFP_KERNEL,
39 &rk_obj->dma_attrs); 39 &rk_obj->dma_attrs);
40 if (!rk_obj->kvaddr) { 40 if (!rk_obj->kvaddr) {
41 DRM_ERROR("failed to allocate %#x byte dma buffer", obj->size); 41 DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
42 return -ENOMEM; 42 return -ENOMEM;
43 } 43 }
44 44
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 1c4d5b5a70a2..8cd840f602b7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -98,7 +98,9 @@ struct vop_win {
98 const struct vop_win_data *data; 98 const struct vop_win_data *data;
99 struct vop *vop; 99 struct vop *vop;
100 100
101 struct vop_plane_state state; 101 /* protected by dev->event_lock */
102 bool enable;
103 dma_addr_t yrgb_mst;
102}; 104};
103 105
104struct vop { 106struct vop {
@@ -112,6 +114,8 @@ struct vop {
112 bool vsync_work_pending; 114 bool vsync_work_pending;
113 struct completion dsp_hold_completion; 115 struct completion dsp_hold_completion;
114 struct completion wait_update_complete; 116 struct completion wait_update_complete;
117
118 /* protected by dev->event_lock */
115 struct drm_pending_vblank_event *event; 119 struct drm_pending_vblank_event *event;
116 120
117 const struct vop_data *data; 121 const struct vop_data *data;
@@ -431,9 +435,6 @@ static void vop_enable(struct drm_crtc *crtc)
431 struct vop *vop = to_vop(crtc); 435 struct vop *vop = to_vop(crtc);
432 int ret; 436 int ret;
433 437
434 if (vop->is_enabled)
435 return;
436
437 ret = pm_runtime_get_sync(vop->dev); 438 ret = pm_runtime_get_sync(vop->dev);
438 if (ret < 0) { 439 if (ret < 0) {
439 dev_err(vop->dev, "failed to get pm runtime: %d\n", ret); 440 dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
@@ -501,8 +502,7 @@ static void vop_crtc_disable(struct drm_crtc *crtc)
501 struct vop *vop = to_vop(crtc); 502 struct vop *vop = to_vop(crtc);
502 int i; 503 int i;
503 504
504 if (!vop->is_enabled) 505 WARN_ON(vop->event);
505 return;
506 506
507 /* 507 /*
508 * We need to make sure that all windows are disabled before we 508 * We need to make sure that all windows are disabled before we
@@ -553,6 +553,14 @@ static void vop_crtc_disable(struct drm_crtc *crtc)
553 clk_disable(vop->aclk); 553 clk_disable(vop->aclk);
554 clk_disable(vop->hclk); 554 clk_disable(vop->hclk);
555 pm_runtime_put(vop->dev); 555 pm_runtime_put(vop->dev);
556
557 if (crtc->state->event && !crtc->state->active) {
558 spin_lock_irq(&crtc->dev->event_lock);
559 drm_crtc_send_vblank_event(crtc, crtc->state->event);
560 spin_unlock_irq(&crtc->dev->event_lock);
561
562 crtc->state->event = NULL;
563 }
556} 564}
557 565
558static void vop_plane_destroy(struct drm_plane *plane) 566static void vop_plane_destroy(struct drm_plane *plane)
@@ -658,6 +666,11 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
658 if (!old_state->crtc) 666 if (!old_state->crtc)
659 return; 667 return;
660 668
669 spin_lock_irq(&plane->dev->event_lock);
670 vop_win->enable = false;
671 vop_win->yrgb_mst = 0;
672 spin_unlock_irq(&plane->dev->event_lock);
673
661 spin_lock(&vop->reg_lock); 674 spin_lock(&vop->reg_lock);
662 675
663 VOP_WIN_SET(vop, win, enable, 0); 676 VOP_WIN_SET(vop, win, enable, 0);
@@ -692,7 +705,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
692 /* 705 /*
693 * can't update plane when vop is disabled. 706 * can't update plane when vop is disabled.
694 */ 707 */
695 if (!crtc) 708 if (WARN_ON(!crtc))
696 return; 709 return;
697 710
698 if (WARN_ON(!vop->is_enabled)) 711 if (WARN_ON(!vop->is_enabled))
@@ -721,6 +734,11 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
721 offset += (src->y1 >> 16) * fb->pitches[0]; 734 offset += (src->y1 >> 16) * fb->pitches[0];
722 vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0]; 735 vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
723 736
737 spin_lock_irq(&plane->dev->event_lock);
738 vop_win->enable = true;
739 vop_win->yrgb_mst = vop_plane_state->yrgb_mst;
740 spin_unlock_irq(&plane->dev->event_lock);
741
724 spin_lock(&vop->reg_lock); 742 spin_lock(&vop->reg_lock);
725 743
726 VOP_WIN_SET(vop, win, format, vop_plane_state->format); 744 VOP_WIN_SET(vop, win, format, vop_plane_state->format);
@@ -876,30 +894,10 @@ static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
876 WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100)); 894 WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
877} 895}
878 896
879static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
880 struct drm_file *file_priv)
881{
882 struct drm_device *drm = crtc->dev;
883 struct vop *vop = to_vop(crtc);
884 struct drm_pending_vblank_event *e;
885 unsigned long flags;
886
887 spin_lock_irqsave(&drm->event_lock, flags);
888 e = vop->event;
889 if (e && e->base.file_priv == file_priv) {
890 vop->event = NULL;
891
892 e->base.destroy(&e->base);
893 file_priv->event_space += sizeof(e->event);
894 }
895 spin_unlock_irqrestore(&drm->event_lock, flags);
896}
897
898static const struct rockchip_crtc_funcs private_crtc_funcs = { 897static const struct rockchip_crtc_funcs private_crtc_funcs = {
899 .enable_vblank = vop_crtc_enable_vblank, 898 .enable_vblank = vop_crtc_enable_vblank,
900 .disable_vblank = vop_crtc_disable_vblank, 899 .disable_vblank = vop_crtc_disable_vblank,
901 .wait_for_update = vop_crtc_wait_for_update, 900 .wait_for_update = vop_crtc_wait_for_update,
902 .cancel_pending_vblank = vop_crtc_cancel_pending_vblank,
903}; 901};
904 902
905static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, 903static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -931,6 +929,8 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
931 u16 vact_end = vact_st + vdisplay; 929 u16 vact_end = vact_st + vdisplay;
932 uint32_t val; 930 uint32_t val;
933 931
932 WARN_ON(vop->event);
933
934 vop_enable(crtc); 934 vop_enable(crtc);
935 /* 935 /*
936 * If dclk rate is zero, mean that scanout is stop, 936 * If dclk rate is zero, mean that scanout is stop,
@@ -1027,12 +1027,15 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
1027{ 1027{
1028 struct vop *vop = to_vop(crtc); 1028 struct vop *vop = to_vop(crtc);
1029 1029
1030 spin_lock_irq(&crtc->dev->event_lock);
1030 if (crtc->state->event) { 1031 if (crtc->state->event) {
1031 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 1032 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
1033 WARN_ON(vop->event);
1032 1034
1033 vop->event = crtc->state->event; 1035 vop->event = crtc->state->event;
1034 crtc->state->event = NULL; 1036 crtc->state->event = NULL;
1035 } 1037 }
1038 spin_unlock_irq(&crtc->dev->event_lock);
1036} 1039}
1037 1040
1038static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = { 1041static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
@@ -1080,16 +1083,14 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
1080 1083
1081static bool vop_win_pending_is_complete(struct vop_win *vop_win) 1084static bool vop_win_pending_is_complete(struct vop_win *vop_win)
1082{ 1085{
1083 struct drm_plane *plane = &vop_win->base;
1084 struct vop_plane_state *state = to_vop_plane_state(plane->state);
1085 dma_addr_t yrgb_mst; 1086 dma_addr_t yrgb_mst;
1086 1087
1087 if (!state->enable) 1088 if (!vop_win->enable)
1088 return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0; 1089 return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0;
1089 1090
1090 yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data); 1091 yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
1091 1092
1092 return yrgb_mst == state->yrgb_mst; 1093 return yrgb_mst == vop_win->yrgb_mst;
1093} 1094}
1094 1095
1095static void vop_handle_vblank(struct vop *vop) 1096static void vop_handle_vblank(struct vop *vop)
@@ -1104,15 +1105,16 @@ static void vop_handle_vblank(struct vop *vop)
1104 return; 1105 return;
1105 } 1106 }
1106 1107
1108 spin_lock_irqsave(&drm->event_lock, flags);
1107 if (vop->event) { 1109 if (vop->event) {
1108 spin_lock_irqsave(&drm->event_lock, flags);
1109 1110
1110 drm_crtc_send_vblank_event(crtc, vop->event); 1111 drm_crtc_send_vblank_event(crtc, vop->event);
1111 drm_crtc_vblank_put(crtc); 1112 drm_crtc_vblank_put(crtc);
1112 vop->event = NULL; 1113 vop->event = NULL;
1113 1114
1114 spin_unlock_irqrestore(&drm->event_lock, flags);
1115 } 1115 }
1116 spin_unlock_irqrestore(&drm->event_lock, flags);
1117
1116 if (!completion_done(&vop->wait_update_complete)) 1118 if (!completion_done(&vop->wait_update_complete))
1117 complete(&vop->wait_update_complete); 1119 complete(&vop->wait_update_complete);
1118} 1120}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 1e154fc779d5..6547b1db460a 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -441,7 +441,7 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
441 scrtc->event = NULL; 441 scrtc->event = NULL;
442 if (event) { 442 if (event) {
443 drm_crtc_send_vblank_event(&scrtc->crtc, event); 443 drm_crtc_send_vblank_event(&scrtc->crtc, event);
444 drm_vblank_put(dev, 0); 444 drm_crtc_vblank_put(&scrtc->crtc);
445 } 445 }
446 spin_unlock_irqrestore(&dev->event_lock, flags); 446 spin_unlock_irqrestore(&dev->event_lock, flags);
447} 447}
@@ -467,7 +467,7 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
467 467
468 if (event) { 468 if (event) {
469 event->pipe = 0; 469 event->pipe = 0;
470 drm_vblank_get(dev, 0); 470 drm_crtc_vblank_get(&scrtc->crtc);
471 spin_lock_irqsave(&dev->event_lock, flags); 471 spin_lock_irqsave(&dev->event_lock, flags);
472 scrtc->event = event; 472 scrtc->event = event;
473 spin_unlock_irqrestore(&dev->event_lock, flags); 473 spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 7700ff172079..ee79264b5b6a 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -264,7 +264,7 @@ static struct drm_driver shmob_drm_driver = {
264 .get_vblank_counter = drm_vblank_no_hw_counter, 264 .get_vblank_counter = drm_vblank_no_hw_counter,
265 .enable_vblank = shmob_drm_enable_vblank, 265 .enable_vblank = shmob_drm_enable_vblank,
266 .disable_vblank = shmob_drm_disable_vblank, 266 .disable_vblank = shmob_drm_disable_vblank,
267 .gem_free_object = drm_gem_cma_free_object, 267 .gem_free_object_unlocked = drm_gem_cma_free_object,
268 .gem_vm_ops = &drm_gem_cma_vm_ops, 268 .gem_vm_ops = &drm_gem_cma_vm_ops,
269 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 269 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
270 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 270 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 505620c7c2c8..e04deedabd4a 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -51,15 +51,6 @@ static void sti_crtc_disabling(struct drm_crtc *crtc)
51 mixer->status = STI_MIXER_DISABLING; 51 mixer->status = STI_MIXER_DISABLING;
52} 52}
53 53
54static bool sti_crtc_mode_fixup(struct drm_crtc *crtc,
55 const struct drm_display_mode *mode,
56 struct drm_display_mode *adjusted_mode)
57{
58 /* accept the provided drm_display_mode, do not fix it up */
59 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
60 return true;
61}
62
63static int 54static int
64sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) 55sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
65{ 56{
@@ -230,7 +221,6 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
230static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { 221static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
231 .enable = sti_crtc_enable, 222 .enable = sti_crtc_enable,
232 .disable = sti_crtc_disabling, 223 .disable = sti_crtc_disabling,
233 .mode_fixup = sti_crtc_mode_fixup,
234 .mode_set = drm_helper_crtc_mode_set, 224 .mode_set = drm_helper_crtc_mode_set,
235 .mode_set_nofb = sti_crtc_mode_set_nofb, 225 .mode_set_nofb = sti_crtc_mode_set_nofb,
236 .mode_set_base = drm_helper_crtc_mode_set_base, 226 .mode_set_base = drm_helper_crtc_mode_set_base,
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 4e990299735c..53aa0029295b 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -105,12 +105,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data)
105{ 105{
106 struct drm_info_node *node = s->private; 106 struct drm_info_node *node = s->private;
107 struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data; 107 struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data;
108 struct drm_device *dev = node->minor->dev;
109 int ret;
110
111 ret = mutex_lock_interruptible(&dev->struct_mutex);
112 if (ret)
113 return ret;
114 108
115 seq_printf(s, "%s: (vaddr = 0x%p)", 109 seq_printf(s, "%s: (vaddr = 0x%p)",
116 sti_plane_to_str(&cursor->plane), cursor->regs); 110 sti_plane_to_str(&cursor->plane), cursor->regs);
@@ -129,7 +123,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data)
129 DBGFS_DUMP(CUR_AWE); 123 DBGFS_DUMP(CUR_AWE);
130 seq_puts(s, "\n"); 124 seq_puts(s, "\n");
131 125
132 mutex_unlock(&dev->struct_mutex);
133 return 0; 126 return 0;
134} 127}
135 128
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 872495e72294..dd2c400c4a46 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -72,11 +72,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
72 struct drm_info_node *node = s->private; 72 struct drm_info_node *node = s->private;
73 struct drm_device *dev = node->minor->dev; 73 struct drm_device *dev = node->minor->dev;
74 struct drm_plane *p; 74 struct drm_plane *p;
75 int ret;
76
77 ret = mutex_lock_interruptible(&dev->struct_mutex);
78 if (ret)
79 return ret;
80 75
81 list_for_each_entry(p, &dev->mode_config.plane_list, head) { 76 list_for_each_entry(p, &dev->mode_config.plane_list, head) {
82 struct sti_plane *plane = to_sti_plane(p); 77 struct sti_plane *plane = to_sti_plane(p);
@@ -86,7 +81,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
86 plane->fps_info.fips_str); 81 plane->fps_info.fips_str);
87 } 82 }
88 83
89 mutex_unlock(&dev->struct_mutex);
90 return 0; 84 return 0;
91} 85}
92 86
@@ -221,7 +215,7 @@ static int sti_atomic_commit(struct drm_device *drm,
221 * the software side now. 215 * the software side now.
222 */ 216 */
223 217
224 drm_atomic_helper_swap_state(drm, state); 218 drm_atomic_helper_swap_state(state, true);
225 219
226 if (nonblock) 220 if (nonblock)
227 sti_atomic_schedule(private, state); 221 sti_atomic_schedule(private, state);
@@ -310,7 +304,7 @@ static struct drm_driver sti_driver = {
310 .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | 304 .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
311 DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, 305 DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
312 .load = sti_load, 306 .load = sti_load,
313 .gem_free_object = drm_gem_cma_free_object, 307 .gem_free_object_unlocked = drm_gem_cma_free_object,
314 .gem_vm_ops = &drm_gem_cma_vm_ops, 308 .gem_vm_ops = &drm_gem_cma_vm_ops,
315 .dumb_create = drm_gem_cma_dumb_create, 309 .dumb_create = drm_gem_cma_dumb_create,
316 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 310 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 25f76632002c..e2901667eceb 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -177,12 +177,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data)
177{ 177{
178 struct drm_info_node *node = s->private; 178 struct drm_info_node *node = s->private;
179 struct sti_dvo *dvo = (struct sti_dvo *)node->info_ent->data; 179 struct sti_dvo *dvo = (struct sti_dvo *)node->info_ent->data;
180 struct drm_device *dev = node->minor->dev;
181 int ret;
182
183 ret = mutex_lock_interruptible(&dev->struct_mutex);
184 if (ret)
185 return ret;
186 180
187 seq_printf(s, "DVO: (vaddr = 0x%p)", dvo->regs); 181 seq_printf(s, "DVO: (vaddr = 0x%p)", dvo->regs);
188 DBGFS_DUMP(DVO_AWG_DIGSYNC_CTRL); 182 DBGFS_DUMP(DVO_AWG_DIGSYNC_CTRL);
@@ -193,7 +187,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data)
193 dvo_dbg_awg_microcode(s, dvo->regs + DVO_DIGSYNC_INSTR_I); 187 dvo_dbg_awg_microcode(s, dvo->regs + DVO_DIGSYNC_INSTR_I);
194 seq_puts(s, "\n"); 188 seq_puts(s, "\n");
195 189
196 mutex_unlock(&dev->struct_mutex);
197 return 0; 190 return 0;
198} 191}
199 192
@@ -384,20 +377,10 @@ static int sti_dvo_connector_mode_valid(struct drm_connector *connector,
384 return MODE_OK; 377 return MODE_OK;
385} 378}
386 379
387struct drm_encoder *sti_dvo_best_encoder(struct drm_connector *connector)
388{
389 struct sti_dvo_connector *dvo_connector
390 = to_sti_dvo_connector(connector);
391
392 /* Best encoder is the one associated during connector creation */
393 return dvo_connector->encoder;
394}
395
396static const 380static const
397struct drm_connector_helper_funcs sti_dvo_connector_helper_funcs = { 381struct drm_connector_helper_funcs sti_dvo_connector_helper_funcs = {
398 .get_modes = sti_dvo_connector_get_modes, 382 .get_modes = sti_dvo_connector_get_modes,
399 .mode_valid = sti_dvo_connector_mode_valid, 383 .mode_valid = sti_dvo_connector_mode_valid,
400 .best_encoder = sti_dvo_best_encoder,
401}; 384};
402 385
403static enum drm_connector_status 386static enum drm_connector_status
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index ff33c38da197..fdf69b5a041b 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -208,14 +208,8 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
208{ 208{
209 struct drm_info_node *node = s->private; 209 struct drm_info_node *node = s->private;
210 struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data; 210 struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
211 struct drm_device *dev = node->minor->dev;
212 struct drm_plane *drm_plane = &gdp->plane.drm_plane; 211 struct drm_plane *drm_plane = &gdp->plane.drm_plane;
213 struct drm_crtc *crtc = drm_plane->crtc; 212 struct drm_crtc *crtc = drm_plane->crtc;
214 int ret;
215
216 ret = mutex_lock_interruptible(&dev->struct_mutex);
217 if (ret)
218 return ret;
219 213
220 seq_printf(s, "%s: (vaddr = 0x%p)", 214 seq_printf(s, "%s: (vaddr = 0x%p)",
221 sti_plane_to_str(&gdp->plane), gdp->regs); 215 sti_plane_to_str(&gdp->plane), gdp->regs);
@@ -248,7 +242,6 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
248 seq_printf(s, " Connected to DRM CRTC #%d (%s)\n", 242 seq_printf(s, " Connected to DRM CRTC #%d (%s)\n",
249 crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc))); 243 crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)));
250 244
251 mutex_unlock(&dev->struct_mutex);
252 return 0; 245 return 0;
253} 246}
254 247
@@ -279,13 +272,7 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg)
279{ 272{
280 struct drm_info_node *node = s->private; 273 struct drm_info_node *node = s->private;
281 struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data; 274 struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
282 struct drm_device *dev = node->minor->dev;
283 unsigned int b; 275 unsigned int b;
284 int ret;
285
286 ret = mutex_lock_interruptible(&dev->struct_mutex);
287 if (ret)
288 return ret;
289 276
290 for (b = 0; b < GDP_NODE_NB_BANK; b++) { 277 for (b = 0; b < GDP_NODE_NB_BANK; b++) {
291 seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b); 278 seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b);
@@ -294,7 +281,6 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg)
294 gdp_node_dump_node(s, gdp->node_list[b].btm_field); 281 gdp_node_dump_node(s, gdp->node_list[b].btm_field);
295 } 282 }
296 283
297 mutex_unlock(&dev->struct_mutex);
298 return 0; 284 return 0;
299} 285}
300 286
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index f7d3464cdf09..dcec5a8eda59 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -376,12 +376,6 @@ static int hda_dbg_show(struct seq_file *s, void *data)
376{ 376{
377 struct drm_info_node *node = s->private; 377 struct drm_info_node *node = s->private;
378 struct sti_hda *hda = (struct sti_hda *)node->info_ent->data; 378 struct sti_hda *hda = (struct sti_hda *)node->info_ent->data;
379 struct drm_device *dev = node->minor->dev;
380 int ret;
381
382 ret = mutex_lock_interruptible(&dev->struct_mutex);
383 if (ret)
384 return ret;
385 379
386 seq_printf(s, "HD Analog: (vaddr = 0x%p)", hda->regs); 380 seq_printf(s, "HD Analog: (vaddr = 0x%p)", hda->regs);
387 DBGFS_DUMP(HDA_ANA_CFG); 381 DBGFS_DUMP(HDA_ANA_CFG);
@@ -397,7 +391,6 @@ static int hda_dbg_show(struct seq_file *s, void *data)
397 hda_dbg_video_dacs_ctrl(s, hda->video_dacs_ctrl); 391 hda_dbg_video_dacs_ctrl(s, hda->video_dacs_ctrl);
398 seq_puts(s, "\n"); 392 seq_puts(s, "\n");
399 393
400 mutex_unlock(&dev->struct_mutex);
401 return 0; 394 return 0;
402} 395}
403 396
@@ -676,20 +669,10 @@ static int sti_hda_connector_mode_valid(struct drm_connector *connector,
676 return MODE_OK; 669 return MODE_OK;
677} 670}
678 671
679struct drm_encoder *sti_hda_best_encoder(struct drm_connector *connector)
680{
681 struct sti_hda_connector *hda_connector
682 = to_sti_hda_connector(connector);
683
684 /* Best encoder is the one associated during connector creation */
685 return hda_connector->encoder;
686}
687
688static const 672static const
689struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = { 673struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = {
690 .get_modes = sti_hda_connector_get_modes, 674 .get_modes = sti_hda_connector_get_modes,
691 .mode_valid = sti_hda_connector_mode_valid, 675 .mode_valid = sti_hda_connector_mode_valid,
692 .best_encoder = sti_hda_best_encoder,
693}; 676};
694 677
695static enum drm_connector_status 678static enum drm_connector_status
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 6ef0715bd5b9..36d9d6635784 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -628,12 +628,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
628{ 628{
629 struct drm_info_node *node = s->private; 629 struct drm_info_node *node = s->private;
630 struct sti_hdmi *hdmi = (struct sti_hdmi *)node->info_ent->data; 630 struct sti_hdmi *hdmi = (struct sti_hdmi *)node->info_ent->data;
631 struct drm_device *dev = node->minor->dev;
632 int ret;
633
634 ret = mutex_lock_interruptible(&dev->struct_mutex);
635 if (ret)
636 return ret;
637 631
638 seq_printf(s, "HDMI: (vaddr = 0x%p)", hdmi->regs); 632 seq_printf(s, "HDMI: (vaddr = 0x%p)", hdmi->regs);
639 DBGFS_DUMP("\n", HDMI_CFG); 633 DBGFS_DUMP("\n", HDMI_CFG);
@@ -690,7 +684,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
690 DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_VENDOR); 684 DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_VENDOR);
691 seq_puts(s, "\n"); 685 seq_puts(s, "\n");
692 686
693 mutex_unlock(&dev->struct_mutex);
694 return 0; 687 return 0;
695} 688}
696 689
@@ -897,20 +890,10 @@ static int sti_hdmi_connector_mode_valid(struct drm_connector *connector,
897 return MODE_OK; 890 return MODE_OK;
898} 891}
899 892
900struct drm_encoder *sti_hdmi_best_encoder(struct drm_connector *connector)
901{
902 struct sti_hdmi_connector *hdmi_connector
903 = to_sti_hdmi_connector(connector);
904
905 /* Best encoder is the one associated during connector creation */
906 return hdmi_connector->encoder;
907}
908
909static const 893static const
910struct drm_connector_helper_funcs sti_hdmi_connector_helper_funcs = { 894struct drm_connector_helper_funcs sti_hdmi_connector_helper_funcs = {
911 .get_modes = sti_hdmi_connector_get_modes, 895 .get_modes = sti_hdmi_connector_get_modes,
912 .mode_valid = sti_hdmi_connector_mode_valid, 896 .mode_valid = sti_hdmi_connector_mode_valid,
913 .best_encoder = sti_hdmi_best_encoder,
914}; 897};
915 898
916/* get detection status of display device */ 899/* get detection status of display device */
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 1edec29b9e45..1c06a50fddca 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -555,14 +555,8 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data)
555{ 555{
556 struct drm_info_node *node = s->private; 556 struct drm_info_node *node = s->private;
557 struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data; 557 struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data;
558 struct drm_device *dev = node->minor->dev;
559 int cmd, cmd_offset, infoxp70; 558 int cmd, cmd_offset, infoxp70;
560 void *virt; 559 void *virt;
561 int ret;
562
563 ret = mutex_lock_interruptible(&dev->struct_mutex);
564 if (ret)
565 return ret;
566 560
567 seq_printf(s, "%s: (vaddr = 0x%p)", 561 seq_printf(s, "%s: (vaddr = 0x%p)",
568 sti_plane_to_str(&hqvdp->plane), hqvdp->regs); 562 sti_plane_to_str(&hqvdp->plane), hqvdp->regs);
@@ -630,7 +624,6 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data)
630 624
631 seq_puts(s, "\n"); 625 seq_puts(s, "\n");
632 626
633 mutex_unlock(&dev->struct_mutex);
634 return 0; 627 return 0;
635} 628}
636 629
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index aed7801b51f7..6f86f2b2b6a5 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -151,12 +151,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg)
151{ 151{
152 struct drm_info_node *node = s->private; 152 struct drm_info_node *node = s->private;
153 struct sti_mixer *mixer = (struct sti_mixer *)node->info_ent->data; 153 struct sti_mixer *mixer = (struct sti_mixer *)node->info_ent->data;
154 struct drm_device *dev = node->minor->dev;
155 int ret;
156
157 ret = mutex_lock_interruptible(&dev->struct_mutex);
158 if (ret)
159 return ret;
160 154
161 seq_printf(s, "%s: (vaddr = 0x%p)", 155 seq_printf(s, "%s: (vaddr = 0x%p)",
162 sti_mixer_to_str(mixer), mixer->regs); 156 sti_mixer_to_str(mixer), mixer->regs);
@@ -176,7 +170,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg)
176 mixer_dbg_mxn(s, mixer->regs + GAM_MIXER_MX0); 170 mixer_dbg_mxn(s, mixer->regs + GAM_MIXER_MX0);
177 seq_puts(s, "\n"); 171 seq_puts(s, "\n");
178 172
179 mutex_unlock(&dev->struct_mutex);
180 return 0; 173 return 0;
181} 174}
182 175
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index f983db5a59da..60fe0afa5644 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -515,13 +515,7 @@ static int tvout_dbg_show(struct seq_file *s, void *data)
515{ 515{
516 struct drm_info_node *node = s->private; 516 struct drm_info_node *node = s->private;
517 struct sti_tvout *tvout = (struct sti_tvout *)node->info_ent->data; 517 struct sti_tvout *tvout = (struct sti_tvout *)node->info_ent->data;
518 struct drm_device *dev = node->minor->dev;
519 struct drm_crtc *crtc; 518 struct drm_crtc *crtc;
520 int ret;
521
522 ret = mutex_lock_interruptible(&dev->struct_mutex);
523 if (ret)
524 return ret;
525 519
526 seq_printf(s, "TVOUT: (vaddr = 0x%p)", tvout->regs); 520 seq_printf(s, "TVOUT: (vaddr = 0x%p)", tvout->regs);
527 521
@@ -587,7 +581,6 @@ static int tvout_dbg_show(struct seq_file *s, void *data)
587 DBGFS_DUMP(TVO_AUX_IN_VID_FORMAT); 581 DBGFS_DUMP(TVO_AUX_IN_VID_FORMAT);
588 seq_puts(s, "\n"); 582 seq_puts(s, "\n");
589 583
590 mutex_unlock(&dev->struct_mutex);
591 return 0; 584 return 0;
592} 585}
593 586
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 523ed19f5ac6..0132aaebe598 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -92,12 +92,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg)
92{ 92{
93 struct drm_info_node *node = s->private; 93 struct drm_info_node *node = s->private;
94 struct sti_vid *vid = (struct sti_vid *)node->info_ent->data; 94 struct sti_vid *vid = (struct sti_vid *)node->info_ent->data;
95 struct drm_device *dev = node->minor->dev;
96 int ret;
97
98 ret = mutex_lock_interruptible(&dev->struct_mutex);
99 if (ret)
100 return ret;
101 95
102 seq_printf(s, "VID: (vaddr= 0x%p)", vid->regs); 96 seq_printf(s, "VID: (vaddr= 0x%p)", vid->regs);
103 97
@@ -122,7 +116,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg)
122 DBGFS_DUMP(VID_CSAT); 116 DBGFS_DUMP(VID_CSAT);
123 seq_puts(s, "\n"); 117 seq_puts(s, "\n");
124 118
125 mutex_unlock(&dev->struct_mutex);
126 return 0; 119 return 0;
127} 120}
128 121
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 4182a21f5923..f628b6d8f23f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -51,10 +51,22 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
51{ 51{
52 struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); 52 struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
53 struct sun4i_drv *drv = scrtc->drv; 53 struct sun4i_drv *drv = scrtc->drv;
54 struct drm_pending_vblank_event *event = crtc->state->event;
54 55
55 DRM_DEBUG_DRIVER("Committing plane changes\n"); 56 DRM_DEBUG_DRIVER("Committing plane changes\n");
56 57
57 sun4i_backend_commit(drv->backend); 58 sun4i_backend_commit(drv->backend);
59
60 if (event) {
61 crtc->state->event = NULL;
62
63 spin_lock_irq(&crtc->dev->event_lock);
64 if (drm_crtc_vblank_get(crtc) == 0)
65 drm_crtc_arm_vblank_event(crtc, event);
66 else
67 drm_crtc_send_vblank_event(crtc, event);
68 spin_unlock_irq(&crtc->dev->event_lock);
69 }
58} 70}
59 71
60static void sun4i_crtc_disable(struct drm_crtc *crtc) 72static void sun4i_crtc_disable(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 76e922bb60e5..68e9d85085fb 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -103,7 +103,7 @@ static struct drm_driver sun4i_drv_driver = {
103 .dumb_create = drm_gem_cma_dumb_create, 103 .dumb_create = drm_gem_cma_dumb_create,
104 .dumb_destroy = drm_gem_dumb_destroy, 104 .dumb_destroy = drm_gem_dumb_destroy,
105 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 105 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
106 .gem_free_object = drm_gem_cma_free_object, 106 .gem_free_object_unlocked = drm_gem_cma_free_object,
107 .gem_vm_ops = &drm_gem_cma_vm_ops, 107 .gem_vm_ops = &drm_gem_cma_vm_ops,
108 108
109 /* PRIME Operations */ 109 /* PRIME Operations */
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index ab6494818050..442cfe271688 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -90,19 +90,9 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
90 return MODE_OK; 90 return MODE_OK;
91} 91}
92 92
93static struct drm_encoder *
94sun4i_rgb_best_encoder(struct drm_connector *connector)
95{
96 struct sun4i_rgb *rgb =
97 drm_connector_to_sun4i_rgb(connector);
98
99 return &rgb->encoder;
100}
101
102static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = { 93static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = {
103 .get_modes = sun4i_rgb_get_modes, 94 .get_modes = sun4i_rgb_get_modes,
104 .mode_valid = sun4i_rgb_mode_valid, 95 .mode_valid = sun4i_rgb_mode_valid,
105 .best_encoder = sun4i_rgb_best_encoder,
106}; 96};
107 97
108static enum drm_connector_status 98static enum drm_connector_status
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index bc047f923508..b84147896294 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -526,18 +526,9 @@ static int sun4i_tv_comp_mode_valid(struct drm_connector *connector,
526 return MODE_OK; 526 return MODE_OK;
527} 527}
528 528
529static struct drm_encoder *
530sun4i_tv_comp_best_encoder(struct drm_connector *connector)
531{
532 struct sun4i_tv *tv = drm_connector_to_sun4i_tv(connector);
533
534 return &tv->encoder;
535}
536
537static struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = { 529static struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = {
538 .get_modes = sun4i_tv_comp_get_modes, 530 .get_modes = sun4i_tv_comp_get_modes,
539 .mode_valid = sun4i_tv_comp_mode_valid, 531 .mode_valid = sun4i_tv_comp_mode_valid,
540 .best_encoder = sun4i_tv_comp_best_encoder,
541}; 532};
542 533
543static enum drm_connector_status 534static enum drm_connector_status
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index b59c3bf0df44..a177a42a9849 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -93,7 +93,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
93 * the software side now. 93 * the software side now.
94 */ 94 */
95 95
96 drm_atomic_helper_swap_state(drm, state); 96 drm_atomic_helper_swap_state(state, true);
97 97
98 if (nonblock) 98 if (nonblock)
99 tegra_atomic_schedule(tegra, state); 99 tegra_atomic_schedule(tegra, state);
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index f52d6cb24ff5..0ddcce1b420d 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -239,8 +239,6 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
239void tegra_output_exit(struct tegra_output *output); 239void tegra_output_exit(struct tegra_output *output);
240 240
241int tegra_output_connector_get_modes(struct drm_connector *connector); 241int tegra_output_connector_get_modes(struct drm_connector *connector);
242struct drm_encoder *
243tegra_output_connector_best_encoder(struct drm_connector *connector);
244enum drm_connector_status 242enum drm_connector_status
245tegra_output_connector_detect(struct drm_connector *connector, bool force); 243tegra_output_connector_detect(struct drm_connector *connector, bool force);
246void tegra_output_connector_destroy(struct drm_connector *connector); 244void tegra_output_connector_destroy(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index d1239ebc190f..099cccb2fbcb 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -794,7 +794,6 @@ tegra_dsi_connector_mode_valid(struct drm_connector *connector,
794static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs = { 794static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs = {
795 .get_modes = tegra_output_connector_get_modes, 795 .get_modes = tegra_output_connector_get_modes,
796 .mode_valid = tegra_dsi_connector_mode_valid, 796 .mode_valid = tegra_dsi_connector_mode_valid,
797 .best_encoder = tegra_output_connector_best_encoder,
798}; 797};
799 798
800static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = { 799static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index b7ef4929e347..2fdb8796443e 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -806,7 +806,6 @@ static const struct drm_connector_helper_funcs
806tegra_hdmi_connector_helper_funcs = { 806tegra_hdmi_connector_helper_funcs = {
807 .get_modes = tegra_output_connector_get_modes, 807 .get_modes = tegra_output_connector_get_modes,
808 .mode_valid = tegra_hdmi_connector_mode_valid, 808 .mode_valid = tegra_hdmi_connector_mode_valid,
809 .best_encoder = tegra_output_connector_best_encoder,
810}; 809};
811 810
812static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = { 811static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 46664b622270..1480f6aaffe4 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -42,14 +42,6 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
42 return err; 42 return err;
43} 43}
44 44
45struct drm_encoder *
46tegra_output_connector_best_encoder(struct drm_connector *connector)
47{
48 struct tegra_output *output = connector_to_output(connector);
49
50 return &output->encoder;
51}
52
53enum drm_connector_status 45enum drm_connector_status
54tegra_output_connector_detect(struct drm_connector *connector, bool force) 46tegra_output_connector_detect(struct drm_connector *connector, bool force)
55{ 47{
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index e246334e0252..a131b44e2d6f 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -112,7 +112,6 @@ tegra_rgb_connector_mode_valid(struct drm_connector *connector,
112static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = { 112static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = {
113 .get_modes = tegra_output_connector_get_modes, 113 .get_modes = tegra_output_connector_get_modes,
114 .mode_valid = tegra_rgb_connector_mode_valid, 114 .mode_valid = tegra_rgb_connector_mode_valid,
115 .best_encoder = tegra_output_connector_best_encoder,
116}; 115};
117 116
118static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = { 117static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = {
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 757c6e8603af..34958d71284b 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -1087,7 +1087,6 @@ tegra_sor_connector_mode_valid(struct drm_connector *connector,
1087static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = { 1087static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = {
1088 .get_modes = tegra_sor_connector_get_modes, 1088 .get_modes = tegra_sor_connector_get_modes,
1089 .mode_valid = tegra_sor_connector_mode_valid, 1089 .mode_valid = tegra_sor_connector_mode_valid,
1090 .best_encoder = tegra_output_connector_best_encoder,
1091}; 1090};
1092 1091
1093static const struct drm_encoder_funcs tegra_sor_encoder_funcs = { 1092static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 709bc903524d..308e197908fc 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -549,7 +549,7 @@ static struct drm_driver tilcdc_driver = {
549 .get_vblank_counter = drm_vblank_no_hw_counter, 549 .get_vblank_counter = drm_vblank_no_hw_counter,
550 .enable_vblank = tilcdc_enable_vblank, 550 .enable_vblank = tilcdc_enable_vblank,
551 .disable_vblank = tilcdc_disable_vblank, 551 .disable_vblank = tilcdc_disable_vblank,
552 .gem_free_object = drm_gem_cma_free_object, 552 .gem_free_object_unlocked = drm_gem_cma_free_object,
553 .gem_vm_ops = &drm_gem_cma_vm_ops, 553 .gem_vm_ops = &drm_gem_cma_vm_ops,
554 .dumb_create = drm_gem_cma_dumb_create, 554 .dumb_create = drm_gem_cma_dumb_create,
555 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 555 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index b87afee44995..f92ea9579674 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -376,7 +376,7 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc,
376 376
377 spin_lock_irqsave(&dev->event_lock, flags); 377 spin_lock_irqsave(&dev->event_lock, flags);
378 if (event) 378 if (event)
379 drm_send_vblank_event(dev, 0, event); 379 drm_crtc_send_vblank_event(crtc, event);
380 spin_unlock_irqrestore(&dev->event_lock, flags); 380 spin_unlock_irqrestore(&dev->event_lock, flags);
381 crtc->primary->fb = fb; 381 crtc->primary->fb = fb;
382 382
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index e5a9d3aaf45f..59adcf8532dd 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -291,8 +291,6 @@ static void vc4_bo_cache_free_old(struct drm_device *dev)
291 291
292/* Called on the last userspace/kernel unreference of the BO. Returns 292/* Called on the last userspace/kernel unreference of the BO. Returns
293 * it to the BO cache if possible, otherwise frees it. 293 * it to the BO cache if possible, otherwise frees it.
294 *
295 * Note that this is called with the struct_mutex held.
296 */ 294 */
297void vc4_free_object(struct drm_gem_object *gem_bo) 295void vc4_free_object(struct drm_gem_object *gem_bo)
298{ 296{
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 904d0754ad78..4c0f26a644a3 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -175,20 +175,22 @@ vc4_crtc_lut_load(struct drm_crtc *crtc)
175 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]); 175 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
176} 176}
177 177
178static void 178static int
179vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, 179vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
180 uint32_t start, uint32_t size) 180 uint32_t size)
181{ 181{
182 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 182 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
183 u32 i; 183 u32 i;
184 184
185 for (i = start; i < start + size; i++) { 185 for (i = 0; i < size; i++) {
186 vc4_crtc->lut_r[i] = r[i] >> 8; 186 vc4_crtc->lut_r[i] = r[i] >> 8;
187 vc4_crtc->lut_g[i] = g[i] >> 8; 187 vc4_crtc->lut_g[i] = g[i] >> 8;
188 vc4_crtc->lut_b[i] = b[i] >> 8; 188 vc4_crtc->lut_b[i] = b[i] >> 8;
189 } 189 }
190 190
191 vc4_crtc_lut_load(crtc); 191 vc4_crtc_lut_load(crtc);
192
193 return 0;
192} 194}
193 195
194static u32 vc4_get_fifo_full_level(u32 format) 196static u32 vc4_get_fifo_full_level(u32 format)
@@ -395,6 +397,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
395 struct vc4_dev *vc4 = to_vc4_dev(dev); 397 struct vc4_dev *vc4 = to_vc4_dev(dev);
396 struct drm_plane *plane; 398 struct drm_plane *plane;
397 unsigned long flags; 399 unsigned long flags;
400 const struct drm_plane_state *plane_state;
398 u32 dlist_count = 0; 401 u32 dlist_count = 0;
399 int ret; 402 int ret;
400 403
@@ -404,18 +407,8 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
404 if (hweight32(state->connector_mask) > 1) 407 if (hweight32(state->connector_mask) > 1)
405 return -EINVAL; 408 return -EINVAL;
406 409
407 drm_atomic_crtc_state_for_each_plane(plane, state) { 410 drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, state)
408 struct drm_plane_state *plane_state =
409 state->state->plane_states[drm_plane_index(plane)];
410
411 /* plane might not have changed, in which case take
412 * current state:
413 */
414 if (!plane_state)
415 plane_state = plane->state;
416
417 dlist_count += vc4_plane_dlist_size(plane_state); 411 dlist_count += vc4_plane_dlist_size(plane_state);
418 }
419 412
420 dlist_count++; /* Account for SCALER_CTL0_END. */ 413 dlist_count++; /* Account for SCALER_CTL0_END. */
421 414
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 9817dbfa4ac3..dba1114297e4 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -208,14 +208,6 @@ static int vc4_dpi_connector_get_modes(struct drm_connector *connector)
208 return 0; 208 return 0;
209} 209}
210 210
211static struct drm_encoder *
212vc4_dpi_connector_best_encoder(struct drm_connector *connector)
213{
214 struct vc4_dpi_connector *dpi_connector =
215 to_vc4_dpi_connector(connector);
216 return dpi_connector->encoder;
217}
218
219static const struct drm_connector_funcs vc4_dpi_connector_funcs = { 211static const struct drm_connector_funcs vc4_dpi_connector_funcs = {
220 .dpms = drm_atomic_helper_connector_dpms, 212 .dpms = drm_atomic_helper_connector_dpms,
221 .detect = vc4_dpi_connector_detect, 213 .detect = vc4_dpi_connector_detect,
@@ -228,7 +220,6 @@ static const struct drm_connector_funcs vc4_dpi_connector_funcs = {
228 220
229static const struct drm_connector_helper_funcs vc4_dpi_connector_helper_funcs = { 221static const struct drm_connector_helper_funcs vc4_dpi_connector_helper_funcs = {
230 .get_modes = vc4_dpi_connector_get_modes, 222 .get_modes = vc4_dpi_connector_get_modes,
231 .best_encoder = vc4_dpi_connector_best_encoder,
232}; 223};
233 224
234static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev, 225static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 3446ece21b4a..58b8fc036332 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -99,7 +99,7 @@ static struct drm_driver vc4_drm_driver = {
99#endif 99#endif
100 100
101 .gem_create_object = vc4_create_object, 101 .gem_create_object = vc4_create_object,
102 .gem_free_object = vc4_free_object, 102 .gem_free_object_unlocked = vc4_free_object,
103 .gem_vm_ops = &drm_gem_cma_vm_ops, 103 .gem_vm_ops = &drm_gem_cma_vm_ops,
104 104
105 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 105 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 37cac59401d7..c799baabc008 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -469,7 +469,7 @@ int vc4_kms_load(struct drm_device *dev);
469struct drm_plane *vc4_plane_init(struct drm_device *dev, 469struct drm_plane *vc4_plane_init(struct drm_device *dev,
470 enum drm_plane_type type); 470 enum drm_plane_type type);
471u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); 471u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
472u32 vc4_plane_dlist_size(struct drm_plane_state *state); 472u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
473void vc4_plane_async_set_fb(struct drm_plane *plane, 473void vc4_plane_async_set_fb(struct drm_plane *plane,
474 struct drm_framebuffer *fb); 474 struct drm_framebuffer *fb);
475 475
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 46899d6de675..6155e8aca1c6 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -53,10 +53,8 @@ vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
53{ 53{
54 unsigned int i; 54 unsigned int i;
55 55
56 mutex_lock(&dev->struct_mutex);
57 for (i = 0; i < state->user_state.bo_count; i++) 56 for (i = 0; i < state->user_state.bo_count; i++)
58 drm_gem_object_unreference(state->bo[i]); 57 drm_gem_object_unreference_unlocked(state->bo[i]);
59 mutex_unlock(&dev->struct_mutex);
60 58
61 kfree(state); 59 kfree(state);
62} 60}
@@ -687,11 +685,9 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
687 struct vc4_dev *vc4 = to_vc4_dev(dev); 685 struct vc4_dev *vc4 = to_vc4_dev(dev);
688 unsigned i; 686 unsigned i;
689 687
690 /* Need the struct lock for drm_gem_object_unreference(). */
691 mutex_lock(&dev->struct_mutex);
692 if (exec->bo) { 688 if (exec->bo) {
693 for (i = 0; i < exec->bo_count; i++) 689 for (i = 0; i < exec->bo_count; i++)
694 drm_gem_object_unreference(&exec->bo[i]->base); 690 drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
695 kfree(exec->bo); 691 kfree(exec->bo);
696 } 692 }
697 693
@@ -699,9 +695,8 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
699 struct vc4_bo *bo = list_first_entry(&exec->unref_list, 695 struct vc4_bo *bo = list_first_entry(&exec->unref_list,
700 struct vc4_bo, unref_head); 696 struct vc4_bo, unref_head);
701 list_del(&bo->unref_head); 697 list_del(&bo->unref_head);
702 drm_gem_object_unreference(&bo->base.base); 698 drm_gem_object_unreference_unlocked(&bo->base.base);
703 } 699 }
704 mutex_unlock(&dev->struct_mutex);
705 700
706 mutex_lock(&vc4->power_lock); 701 mutex_lock(&vc4->power_lock);
707 if (--vc4->power_refcount == 0) 702 if (--vc4->power_refcount == 0)
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index fd2644d231ff..68df91c3f860 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -208,14 +208,6 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
208 return ret; 208 return ret;
209} 209}
210 210
211static struct drm_encoder *
212vc4_hdmi_connector_best_encoder(struct drm_connector *connector)
213{
214 struct vc4_hdmi_connector *hdmi_connector =
215 to_vc4_hdmi_connector(connector);
216 return hdmi_connector->encoder;
217}
218
219static const struct drm_connector_funcs vc4_hdmi_connector_funcs = { 211static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
220 .dpms = drm_atomic_helper_connector_dpms, 212 .dpms = drm_atomic_helper_connector_dpms,
221 .detect = vc4_hdmi_connector_detect, 213 .detect = vc4_hdmi_connector_detect,
@@ -228,7 +220,6 @@ static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
228 220
229static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = { 221static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = {
230 .get_modes = vc4_hdmi_connector_get_modes, 222 .get_modes = vc4_hdmi_connector_get_modes,
231 .best_encoder = vc4_hdmi_connector_best_encoder,
232}; 223};
233 224
234static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev, 225static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index cb37751bc99f..8f4d5ffc32be 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -111,6 +111,8 @@ static int vc4_atomic_commit(struct drm_device *dev,
111 int i; 111 int i;
112 uint64_t wait_seqno = 0; 112 uint64_t wait_seqno = 0;
113 struct vc4_commit *c; 113 struct vc4_commit *c;
114 struct drm_plane *plane;
115 struct drm_plane_state *new_state;
114 116
115 c = commit_init(state); 117 c = commit_init(state);
116 if (!c) 118 if (!c)
@@ -130,13 +132,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
130 return ret; 132 return ret;
131 } 133 }
132 134
133 for (i = 0; i < dev->mode_config.num_total_plane; i++) { 135 for_each_plane_in_state(state, plane, new_state, i) {
134 struct drm_plane *plane = state->planes[i];
135 struct drm_plane_state *new_state = state->plane_states[i];
136
137 if (!plane)
138 continue;
139
140 if ((plane->state->fb != new_state->fb) && new_state->fb) { 136 if ((plane->state->fb != new_state->fb) && new_state->fb) {
141 struct drm_gem_cma_object *cma_bo = 137 struct drm_gem_cma_object *cma_bo =
142 drm_fb_cma_get_gem_obj(new_state->fb, 0); 138 drm_fb_cma_get_gem_obj(new_state->fb, 0);
@@ -152,7 +148,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
152 * the software side now. 148 * the software side now.
153 */ 149 */
154 150
155 drm_atomic_helper_swap_state(dev, state); 151 drm_atomic_helper_swap_state(state, true);
156 152
157 /* 153 /*
158 * Everything below can be run asynchronously without the need to grab 154 * Everything below can be run asynchronously without the need to grab
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 4037b52fde31..5d2c3d9fd17a 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -690,9 +690,10 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
690 return vc4_state->dlist_count; 690 return vc4_state->dlist_count;
691} 691}
692 692
693u32 vc4_plane_dlist_size(struct drm_plane_state *state) 693u32 vc4_plane_dlist_size(const struct drm_plane_state *state)
694{ 694{
695 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 695 const struct vc4_plane_state *vc4_state =
696 container_of(state, typeof(*vc4_state), base);
696 697
697 return vc4_state->dlist_count; 698 return vc4_state->dlist_count;
698} 699}
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 341f9be3dde6..1b4cc8b27080 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -235,7 +235,7 @@ static const struct file_operations vgem_driver_fops = {
235 235
236static struct drm_driver vgem_driver = { 236static struct drm_driver vgem_driver = {
237 .driver_features = DRIVER_GEM, 237 .driver_features = DRIVER_GEM,
238 .gem_free_object = vgem_gem_free_object, 238 .gem_free_object_unlocked = vgem_gem_free_object,
239 .gem_vm_ops = &vgem_gem_vm_ops, 239 .gem_vm_ops = &vgem_gem_vm_ops,
240 .ioctls = vgem_ioctls, 240 .ioctls = vgem_ioctls,
241 .fops = &vgem_driver_fops, 241 .fops = &vgem_driver_fops,
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index d4305da88f44..ac758cdbc1bc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -29,8 +29,8 @@
29#include <drm/drm_crtc_helper.h> 29#include <drm/drm_crtc_helper.h>
30#include <drm/drm_atomic_helper.h> 30#include <drm/drm_atomic_helper.h>
31 31
32#define XRES_MIN 320 32#define XRES_MIN 32
33#define YRES_MIN 200 33#define YRES_MIN 32
34 34
35#define XRES_DEF 1024 35#define XRES_DEF 1024
36#define YRES_DEF 768 36#define YRES_DEF 768
@@ -38,138 +38,11 @@
38#define XRES_MAX 8192 38#define XRES_MAX 8192
39#define YRES_MAX 8192 39#define YRES_MAX 8192
40 40
41static void
42virtio_gpu_hide_cursor(struct virtio_gpu_device *vgdev,
43 struct virtio_gpu_output *output)
44{
45 output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
46 output->cursor.resource_id = 0;
47 virtio_gpu_cursor_ping(vgdev, output);
48}
49
50static int virtio_gpu_crtc_cursor_set(struct drm_crtc *crtc,
51 struct drm_file *file_priv,
52 uint32_t handle,
53 uint32_t width,
54 uint32_t height,
55 int32_t hot_x, int32_t hot_y)
56{
57 struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
58 struct virtio_gpu_output *output =
59 container_of(crtc, struct virtio_gpu_output, crtc);
60 struct drm_gem_object *gobj = NULL;
61 struct virtio_gpu_object *qobj = NULL;
62 struct virtio_gpu_fence *fence = NULL;
63 int ret = 0;
64
65 if (handle == 0) {
66 virtio_gpu_hide_cursor(vgdev, output);
67 return 0;
68 }
69
70 /* lookup the cursor */
71 gobj = drm_gem_object_lookup(file_priv, handle);
72 if (gobj == NULL)
73 return -ENOENT;
74
75 qobj = gem_to_virtio_gpu_obj(gobj);
76
77 if (!qobj->hw_res_handle) {
78 ret = -EINVAL;
79 goto out;
80 }
81
82 virtio_gpu_cmd_transfer_to_host_2d(vgdev, qobj->hw_res_handle, 0,
83 cpu_to_le32(64),
84 cpu_to_le32(64),
85 0, 0, &fence);
86 ret = virtio_gpu_object_reserve(qobj, false);
87 if (!ret) {
88 reservation_object_add_excl_fence(qobj->tbo.resv,
89 &fence->f);
90 fence_put(&fence->f);
91 virtio_gpu_object_unreserve(qobj);
92 virtio_gpu_object_wait(qobj, false);
93 }
94
95 output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
96 output->cursor.resource_id = cpu_to_le32(qobj->hw_res_handle);
97 output->cursor.hot_x = cpu_to_le32(hot_x);
98 output->cursor.hot_y = cpu_to_le32(hot_y);
99 virtio_gpu_cursor_ping(vgdev, output);
100 ret = 0;
101
102out:
103 drm_gem_object_unreference_unlocked(gobj);
104 return ret;
105}
106
107static int virtio_gpu_crtc_cursor_move(struct drm_crtc *crtc,
108 int x, int y)
109{
110 struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
111 struct virtio_gpu_output *output =
112 container_of(crtc, struct virtio_gpu_output, crtc);
113
114 output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
115 output->cursor.pos.x = cpu_to_le32(x);
116 output->cursor.pos.y = cpu_to_le32(y);
117 virtio_gpu_cursor_ping(vgdev, output);
118 return 0;
119}
120
121static int virtio_gpu_page_flip(struct drm_crtc *crtc,
122 struct drm_framebuffer *fb,
123 struct drm_pending_vblank_event *event,
124 uint32_t flags)
125{
126 struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
127 struct virtio_gpu_output *output =
128 container_of(crtc, struct virtio_gpu_output, crtc);
129 struct drm_plane *plane = crtc->primary;
130 struct virtio_gpu_framebuffer *vgfb;
131 struct virtio_gpu_object *bo;
132 unsigned long irqflags;
133 uint32_t handle;
134
135 plane->fb = fb;
136 vgfb = to_virtio_gpu_framebuffer(plane->fb);
137 bo = gem_to_virtio_gpu_obj(vgfb->obj);
138 handle = bo->hw_res_handle;
139
140 DRM_DEBUG("handle 0x%x%s, crtc %dx%d\n", handle,
141 bo->dumb ? ", dumb" : "",
142 crtc->mode.hdisplay, crtc->mode.vdisplay);
143 if (bo->dumb) {
144 virtio_gpu_cmd_transfer_to_host_2d
145 (vgdev, handle, 0,
146 cpu_to_le32(crtc->mode.hdisplay),
147 cpu_to_le32(crtc->mode.vdisplay),
148 0, 0, NULL);
149 }
150 virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
151 crtc->mode.hdisplay,
152 crtc->mode.vdisplay, 0, 0);
153 virtio_gpu_cmd_resource_flush(vgdev, handle, 0, 0,
154 crtc->mode.hdisplay,
155 crtc->mode.vdisplay);
156
157 if (event) {
158 spin_lock_irqsave(&crtc->dev->event_lock, irqflags);
159 drm_send_vblank_event(crtc->dev, -1, event);
160 spin_unlock_irqrestore(&crtc->dev->event_lock, irqflags);
161 }
162
163 return 0;
164}
165
166static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = { 41static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
167 .cursor_set2 = virtio_gpu_crtc_cursor_set,
168 .cursor_move = virtio_gpu_crtc_cursor_move,
169 .set_config = drm_atomic_helper_set_config, 42 .set_config = drm_atomic_helper_set_config,
170 .destroy = drm_crtc_cleanup, 43 .destroy = drm_crtc_cleanup,
171 44
172 .page_flip = virtio_gpu_page_flip, 45 .page_flip = drm_atomic_helper_page_flip,
173 .reset = drm_atomic_helper_crtc_reset, 46 .reset = drm_atomic_helper_crtc_reset,
174 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 47 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
175 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 48 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
@@ -267,6 +140,7 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
267 spin_lock_irqsave(&crtc->dev->event_lock, flags); 140 spin_lock_irqsave(&crtc->dev->event_lock, flags);
268 if (crtc->state->event) 141 if (crtc->state->event)
269 drm_crtc_send_vblank_event(crtc, crtc->state->event); 142 drm_crtc_send_vblank_event(crtc, crtc->state->event);
143 crtc->state->event = NULL;
270 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 144 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
271} 145}
272 146
@@ -341,15 +215,6 @@ static int virtio_gpu_conn_mode_valid(struct drm_connector *connector,
341 return MODE_BAD; 215 return MODE_BAD;
342} 216}
343 217
344static struct drm_encoder*
345virtio_gpu_best_encoder(struct drm_connector *connector)
346{
347 struct virtio_gpu_output *virtio_gpu_output =
348 drm_connector_to_virtio_gpu_output(connector);
349
350 return &virtio_gpu_output->enc;
351}
352
353static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = { 218static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
354 .mode_set = virtio_gpu_enc_mode_set, 219 .mode_set = virtio_gpu_enc_mode_set,
355 .enable = virtio_gpu_enc_enable, 220 .enable = virtio_gpu_enc_enable,
@@ -359,7 +224,6 @@ static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
359static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = { 224static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = {
360 .get_modes = virtio_gpu_conn_get_modes, 225 .get_modes = virtio_gpu_conn_get_modes,
361 .mode_valid = virtio_gpu_conn_mode_valid, 226 .mode_valid = virtio_gpu_conn_mode_valid,
362 .best_encoder = virtio_gpu_best_encoder,
363}; 227};
364 228
365static enum drm_connector_status virtio_gpu_conn_detect( 229static enum drm_connector_status virtio_gpu_conn_detect(
@@ -406,7 +270,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
406 struct drm_connector *connector = &output->conn; 270 struct drm_connector *connector = &output->conn;
407 struct drm_encoder *encoder = &output->enc; 271 struct drm_encoder *encoder = &output->enc;
408 struct drm_crtc *crtc = &output->crtc; 272 struct drm_crtc *crtc = &output->crtc;
409 struct drm_plane *plane; 273 struct drm_plane *primary, *cursor;
410 274
411 output->index = index; 275 output->index = index;
412 if (index == 0) { 276 if (index == 0) {
@@ -415,13 +279,17 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
415 output->info.r.height = cpu_to_le32(YRES_DEF); 279 output->info.r.height = cpu_to_le32(YRES_DEF);
416 } 280 }
417 281
418 plane = virtio_gpu_plane_init(vgdev, index); 282 primary = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_PRIMARY, index);
419 if (IS_ERR(plane)) 283 if (IS_ERR(primary))
420 return PTR_ERR(plane); 284 return PTR_ERR(primary);
421 drm_crtc_init_with_planes(dev, crtc, plane, NULL, 285 cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index);
286 if (IS_ERR(cursor))
287 return PTR_ERR(cursor);
288 drm_crtc_init_with_planes(dev, crtc, primary, cursor,
422 &virtio_gpu_crtc_funcs, NULL); 289 &virtio_gpu_crtc_funcs, NULL);
423 drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs); 290 drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
424 plane->crtc = crtc; 291 primary->crtc = crtc;
292 cursor->crtc = crtc;
425 293
426 drm_connector_init(dev, connector, &virtio_gpu_connector_funcs, 294 drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
427 DRM_MODE_CONNECTOR_VIRTUAL); 295 DRM_MODE_CONNECTOR_VIRTUAL);
@@ -466,6 +334,24 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
466 return &virtio_gpu_fb->base; 334 return &virtio_gpu_fb->base;
467} 335}
468 336
337static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
338{
339 struct drm_device *dev = state->dev;
340
341 drm_atomic_helper_commit_modeset_disables(dev, state);
342 drm_atomic_helper_commit_modeset_enables(dev, state);
343 drm_atomic_helper_commit_planes(dev, state, true);
344
345 drm_atomic_helper_commit_hw_done(state);
346
347 drm_atomic_helper_wait_for_vblanks(dev, state);
348 drm_atomic_helper_cleanup_planes(dev, state);
349}
350
351struct drm_mode_config_helper_funcs virtio_mode_config_helpers = {
352 .atomic_commit_tail = vgdev_atomic_commit_tail,
353};
354
469static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = { 355static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = {
470 .fb_create = virtio_gpu_user_framebuffer_create, 356 .fb_create = virtio_gpu_user_framebuffer_create,
471 .atomic_check = drm_atomic_helper_check, 357 .atomic_check = drm_atomic_helper_check,
@@ -477,7 +363,8 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
477 int i; 363 int i;
478 364
479 drm_mode_config_init(vgdev->ddev); 365 drm_mode_config_init(vgdev->ddev);
480 vgdev->ddev->mode_config.funcs = (void *)&virtio_gpu_mode_funcs; 366 vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs;
367 vgdev->ddev->mode_config.helper_private = &virtio_mode_config_helpers;
481 368
482 /* modes will be validated against the framebuffer size */ 369 /* modes will be validated against the framebuffer size */
483 vgdev->ddev->mode_config.min_width = XRES_MIN; 370 vgdev->ddev->mode_config.min_width = XRES_MIN;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 3cc7afa77a35..5820b7020ae5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -143,7 +143,7 @@ static struct drm_driver driver = {
143 .gem_prime_vunmap = virtgpu_gem_prime_vunmap, 143 .gem_prime_vunmap = virtgpu_gem_prime_vunmap,
144 .gem_prime_mmap = virtgpu_gem_prime_mmap, 144 .gem_prime_mmap = virtgpu_gem_prime_mmap,
145 145
146 .gem_free_object = virtio_gpu_gem_free_object, 146 .gem_free_object_unlocked = virtio_gpu_gem_free_object,
147 .gem_open_object = virtio_gpu_gem_object_open, 147 .gem_open_object = virtio_gpu_gem_object_open,
148 .gem_close_object = virtio_gpu_gem_object_close, 148 .gem_close_object = virtio_gpu_gem_object_close,
149 .fops = &virtio_gpu_driver_fops, 149 .fops = &virtio_gpu_driver_fops,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 0a54f43f846a..acf556a35cb2 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -33,6 +33,7 @@
33 33
34#include <drm/drmP.h> 34#include <drm/drmP.h>
35#include <drm/drm_gem.h> 35#include <drm/drm_gem.h>
36#include <drm/drm_atomic.h>
36#include <drm/drm_crtc_helper.h> 37#include <drm/drm_crtc_helper.h>
37#include <ttm/ttm_bo_api.h> 38#include <ttm/ttm_bo_api.h>
38#include <ttm/ttm_bo_driver.h> 39#include <ttm/ttm_bo_driver.h>
@@ -335,6 +336,7 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
335 336
336/* virtio_gpu_plane.c */ 337/* virtio_gpu_plane.c */
337struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, 338struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
339 enum drm_plane_type type,
338 int index); 340 int index);
339 341
340/* virtio_gpu_ttm.c */ 342/* virtio_gpu_ttm.c */
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 70b44a2345ab..925ca25209df 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -38,6 +38,10 @@ static const uint32_t virtio_gpu_formats[] = {
38 DRM_FORMAT_ABGR8888, 38 DRM_FORMAT_ABGR8888,
39}; 39};
40 40
41static const uint32_t virtio_gpu_cursor_formats[] = {
42 DRM_FORMAT_ARGB8888,
43};
44
41static void virtio_gpu_plane_destroy(struct drm_plane *plane) 45static void virtio_gpu_plane_destroy(struct drm_plane *plane)
42{ 46{
43 kfree(plane); 47 kfree(plane);
@@ -58,16 +62,22 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
58 return 0; 62 return 0;
59} 63}
60 64
61static void virtio_gpu_plane_atomic_update(struct drm_plane *plane, 65static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
62 struct drm_plane_state *old_state) 66 struct drm_plane_state *old_state)
63{ 67{
64 struct drm_device *dev = plane->dev; 68 struct drm_device *dev = plane->dev;
65 struct virtio_gpu_device *vgdev = dev->dev_private; 69 struct virtio_gpu_device *vgdev = dev->dev_private;
66 struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(plane->crtc); 70 struct virtio_gpu_output *output = NULL;
67 struct virtio_gpu_framebuffer *vgfb; 71 struct virtio_gpu_framebuffer *vgfb;
68 struct virtio_gpu_object *bo; 72 struct virtio_gpu_object *bo;
69 uint32_t handle; 73 uint32_t handle;
70 74
75 if (plane->state->crtc)
76 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
77 if (old_state->crtc)
78 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
79 WARN_ON(!output);
80
71 if (plane->state->fb) { 81 if (plane->state->fb) {
72 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 82 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
73 bo = gem_to_virtio_gpu_obj(vgfb->obj); 83 bo = gem_to_virtio_gpu_obj(vgfb->obj);
@@ -75,55 +85,149 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
75 if (bo->dumb) { 85 if (bo->dumb) {
76 virtio_gpu_cmd_transfer_to_host_2d 86 virtio_gpu_cmd_transfer_to_host_2d
77 (vgdev, handle, 0, 87 (vgdev, handle, 0,
78 cpu_to_le32(plane->state->crtc_w), 88 cpu_to_le32(plane->state->src_w >> 16),
79 cpu_to_le32(plane->state->crtc_h), 89 cpu_to_le32(plane->state->src_h >> 16),
80 plane->state->crtc_x, plane->state->crtc_y, NULL); 90 plane->state->src_x >> 16,
91 plane->state->src_y >> 16, NULL);
81 } 92 }
82 } else { 93 } else {
83 handle = 0; 94 handle = 0;
84 } 95 }
85 96
86 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d\n", handle, 97 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle,
87 plane->state->crtc_w, plane->state->crtc_h, 98 plane->state->crtc_w, plane->state->crtc_h,
88 plane->state->crtc_x, plane->state->crtc_y); 99 plane->state->crtc_x, plane->state->crtc_y,
100 plane->state->src_w >> 16,
101 plane->state->src_h >> 16,
102 plane->state->src_x >> 16,
103 plane->state->src_y >> 16);
89 virtio_gpu_cmd_set_scanout(vgdev, output->index, handle, 104 virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
90 plane->state->crtc_w, 105 plane->state->src_w >> 16,
91 plane->state->crtc_h, 106 plane->state->src_h >> 16,
92 plane->state->crtc_x, 107 plane->state->src_x >> 16,
93 plane->state->crtc_y); 108 plane->state->src_y >> 16);
94 virtio_gpu_cmd_resource_flush(vgdev, handle, 109 virtio_gpu_cmd_resource_flush(vgdev, handle,
95 plane->state->crtc_x, 110 plane->state->src_x >> 16,
96 plane->state->crtc_y, 111 plane->state->src_y >> 16,
97 plane->state->crtc_w, 112 plane->state->src_w >> 16,
98 plane->state->crtc_h); 113 plane->state->src_h >> 16);
99} 114}
100 115
116static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
117 struct drm_plane_state *old_state)
118{
119 struct drm_device *dev = plane->dev;
120 struct virtio_gpu_device *vgdev = dev->dev_private;
121 struct virtio_gpu_output *output = NULL;
122 struct virtio_gpu_framebuffer *vgfb;
123 struct virtio_gpu_fence *fence = NULL;
124 struct virtio_gpu_object *bo = NULL;
125 uint32_t handle;
126 int ret = 0;
101 127
102static const struct drm_plane_helper_funcs virtio_gpu_plane_helper_funcs = { 128 if (plane->state->crtc)
129 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
130 if (old_state->crtc)
131 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
132 WARN_ON(!output);
133
134 if (plane->state->fb) {
135 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
136 bo = gem_to_virtio_gpu_obj(vgfb->obj);
137 handle = bo->hw_res_handle;
138 } else {
139 handle = 0;
140 }
141
142 if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
143 /* new cursor -- update & wait */
144 virtio_gpu_cmd_transfer_to_host_2d
145 (vgdev, handle, 0,
146 cpu_to_le32(plane->state->crtc_w),
147 cpu_to_le32(plane->state->crtc_h),
148 0, 0, &fence);
149 ret = virtio_gpu_object_reserve(bo, false);
150 if (!ret) {
151 reservation_object_add_excl_fence(bo->tbo.resv,
152 &fence->f);
153 fence_put(&fence->f);
154 fence = NULL;
155 virtio_gpu_object_unreserve(bo);
156 virtio_gpu_object_wait(bo, false);
157 }
158 }
159
160 if (plane->state->fb != old_state->fb) {
161 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
162 plane->state->crtc_x,
163 plane->state->crtc_y,
164 plane->state->fb ? plane->state->fb->hot_x : 0,
165 plane->state->fb ? plane->state->fb->hot_y : 0);
166 output->cursor.hdr.type =
167 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
168 output->cursor.resource_id = cpu_to_le32(handle);
169 if (plane->state->fb) {
170 output->cursor.hot_x =
171 cpu_to_le32(plane->state->fb->hot_x);
172 output->cursor.hot_y =
173 cpu_to_le32(plane->state->fb->hot_y);
174 } else {
175 output->cursor.hot_x = cpu_to_le32(0);
176 output->cursor.hot_y = cpu_to_le32(0);
177 }
178 } else {
179 DRM_DEBUG("move +%d+%d\n",
180 plane->state->crtc_x,
181 plane->state->crtc_y);
182 output->cursor.hdr.type =
183 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
184 }
185 output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
186 output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
187 virtio_gpu_cursor_ping(vgdev, output);
188}
189
190static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
191 .atomic_check = virtio_gpu_plane_atomic_check,
192 .atomic_update = virtio_gpu_primary_plane_update,
193};
194
195static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
103 .atomic_check = virtio_gpu_plane_atomic_check, 196 .atomic_check = virtio_gpu_plane_atomic_check,
104 .atomic_update = virtio_gpu_plane_atomic_update, 197 .atomic_update = virtio_gpu_cursor_plane_update,
105}; 198};
106 199
107struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, 200struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
201 enum drm_plane_type type,
108 int index) 202 int index)
109{ 203{
110 struct drm_device *dev = vgdev->ddev; 204 struct drm_device *dev = vgdev->ddev;
205 const struct drm_plane_helper_funcs *funcs;
111 struct drm_plane *plane; 206 struct drm_plane *plane;
112 int ret; 207 const uint32_t *formats;
208 int ret, nformats;
113 209
114 plane = kzalloc(sizeof(*plane), GFP_KERNEL); 210 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
115 if (!plane) 211 if (!plane)
116 return ERR_PTR(-ENOMEM); 212 return ERR_PTR(-ENOMEM);
117 213
214 if (type == DRM_PLANE_TYPE_CURSOR) {
215 formats = virtio_gpu_cursor_formats;
216 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
217 funcs = &virtio_gpu_cursor_helper_funcs;
218 } else {
219 formats = virtio_gpu_formats;
220 nformats = ARRAY_SIZE(virtio_gpu_formats);
221 funcs = &virtio_gpu_primary_helper_funcs;
222 }
118 ret = drm_universal_plane_init(dev, plane, 1 << index, 223 ret = drm_universal_plane_init(dev, plane, 1 << index,
119 &virtio_gpu_plane_funcs, 224 &virtio_gpu_plane_funcs,
120 virtio_gpu_formats, 225 formats, nformats,
121 ARRAY_SIZE(virtio_gpu_formats), 226 type, NULL);
122 DRM_PLANE_TYPE_PRIMARY, NULL);
123 if (ret) 227 if (ret)
124 goto err_plane_init; 228 goto err_plane_init;
125 229
126 drm_plane_helper_add(plane, &virtio_gpu_plane_helper_funcs); 230 drm_plane_helper_add(plane, funcs);
127 return plane; 231 return plane;
128 232
129err_plane_init: 233err_plane_init:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index e959df6ede83..26ac8e80a478 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -46,7 +46,7 @@ struct vmw_fence_manager {
46 bool goal_irq_on; /* Protected by @goal_irq_mutex */ 46 bool goal_irq_on; /* Protected by @goal_irq_mutex */
47 bool seqno_valid; /* Protected by @lock, and may not be set to true 47 bool seqno_valid; /* Protected by @lock, and may not be set to true
48 without the @goal_irq_mutex held. */ 48 without the @goal_irq_mutex held. */
49 unsigned ctx; 49 u64 ctx;
50}; 50};
51 51
52struct vmw_user_fence { 52struct vmw_user_fence {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 55231cce73a0..8a69d4da40b5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1404,9 +1404,9 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
1404 return 0; 1404 return 0;
1405} 1405}
1406 1406
1407void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 1407int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
1408 u16 *r, u16 *g, u16 *b, 1408 u16 *r, u16 *g, u16 *b,
1409 uint32_t start, uint32_t size) 1409 uint32_t size)
1410{ 1410{
1411 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 1411 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1412 int i; 1412 int i;
@@ -1418,6 +1418,8 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
1418 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8); 1418 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
1419 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8); 1419 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
1420 } 1420 }
1421
1422 return 0;
1421} 1423}
1422 1424
1423int vmw_du_connector_dpms(struct drm_connector *connector, int mode) 1425int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 57203212c501..ff4803c107bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -195,9 +195,9 @@ struct vmw_display_unit {
195void vmw_du_cleanup(struct vmw_display_unit *du); 195void vmw_du_cleanup(struct vmw_display_unit *du);
196void vmw_du_crtc_save(struct drm_crtc *crtc); 196void vmw_du_crtc_save(struct drm_crtc *crtc);
197void vmw_du_crtc_restore(struct drm_crtc *crtc); 197void vmw_du_crtc_restore(struct drm_crtc *crtc);
198void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 198int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
199 u16 *r, u16 *g, u16 *b, 199 u16 *r, u16 *g, u16 *b,
200 uint32_t start, uint32_t size); 200 uint32_t size);
201int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, 201int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
202 uint32_t handle, uint32_t width, uint32_t height, 202 uint32_t handle, uint32_t width, uint32_t height,
203 int32_t hot_x, int32_t hot_y); 203 int32_t hot_x, int32_t hot_y);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index cbd7c986d926..2df216b39cc5 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -30,6 +30,7 @@
30 30
31#define pr_fmt(fmt) "vga_switcheroo: " fmt 31#define pr_fmt(fmt) "vga_switcheroo: " fmt
32 32
33#include <linux/apple-gmux.h>
33#include <linux/console.h> 34#include <linux/console.h>
34#include <linux/debugfs.h> 35#include <linux/debugfs.h>
35#include <linux/fb.h> 36#include <linux/fb.h>
@@ -308,7 +309,8 @@ static int register_client(struct pci_dev *pdev,
308 * 309 *
309 * Register vga client (GPU). Enable vga_switcheroo if another GPU and a 310 * Register vga client (GPU). Enable vga_switcheroo if another GPU and a
310 * handler have already registered. The power state of the client is assumed 311 * handler have already registered. The power state of the client is assumed
311 * to be ON. 312 * to be ON. Beforehand, vga_switcheroo_client_probe_defer() shall be called
313 * to ensure that all prerequisites are met.
312 * 314 *
313 * Return: 0 on success, -ENOMEM on memory allocation error. 315 * Return: 0 on success, -ENOMEM on memory allocation error.
314 */ 316 */
@@ -329,7 +331,8 @@ EXPORT_SYMBOL(vga_switcheroo_register_client);
329 * @id: client identifier 331 * @id: client identifier
330 * 332 *
331 * Register audio client (audio device on a GPU). The power state of the 333 * Register audio client (audio device on a GPU). The power state of the
332 * client is assumed to be ON. 334 * client is assumed to be ON. Beforehand, vga_switcheroo_client_probe_defer()
335 * shall be called to ensure that all prerequisites are met.
333 * 336 *
334 * Return: 0 on success, -ENOMEM on memory allocation error. 337 * Return: 0 on success, -ENOMEM on memory allocation error.
335 */ 338 */
@@ -376,6 +379,33 @@ find_active_client(struct list_head *head)
376} 379}
377 380
378/** 381/**
382 * vga_switcheroo_client_probe_defer() - whether to defer probing a given client
383 * @pdev: client pci device
384 *
385 * Determine whether any prerequisites are not fulfilled to probe a given
386 * client. Drivers shall invoke this early on in their ->probe callback
387 * and return %-EPROBE_DEFER if it evaluates to %true. Thou shalt not
388 * register the client ere thou hast called this.
389 *
390 * Return: %true if probing should be deferred, otherwise %false.
391 */
392bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev)
393{
394 if ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
395 /*
396 * apple-gmux is needed on pre-retina MacBook Pro
397 * to probe the panel if pdev is the inactive GPU.
398 */
399 if (apple_gmux_present() && pdev != vga_default_device() &&
400 !vgasr_priv.handler_flags)
401 return true;
402 }
403
404 return false;
405}
406EXPORT_SYMBOL(vga_switcheroo_client_probe_defer);
407
408/**
379 * vga_switcheroo_get_client_state() - obtain power state of a given client 409 * vga_switcheroo_get_client_state() - obtain power state of a given client
380 * @pdev: client pci device 410 * @pdev: client pci device
381 * 411 *
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 6bd881be24ea..5eb1f9e17a98 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -41,6 +41,7 @@
41 41
42#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) 42#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
43#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) 43#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
44#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
44 45
45#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) 46#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
46 47
@@ -82,6 +83,7 @@ struct its_node {
82 u64 flags; 83 u64 flags;
83 u32 ite_size; 84 u32 ite_size;
84 u32 device_ids; 85 u32 device_ids;
86 int numa_node;
85}; 87};
86 88
87#define ITS_ITT_ALIGN SZ_256 89#define ITS_ITT_ALIGN SZ_256
@@ -613,11 +615,23 @@ static void its_unmask_irq(struct irq_data *d)
613static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 615static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
614 bool force) 616 bool force)
615{ 617{
616 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); 618 unsigned int cpu;
619 const struct cpumask *cpu_mask = cpu_online_mask;
617 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 620 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
618 struct its_collection *target_col; 621 struct its_collection *target_col;
619 u32 id = its_get_event_id(d); 622 u32 id = its_get_event_id(d);
620 623
624 /* lpi cannot be routed to a redistributor that is on a foreign node */
625 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
626 if (its_dev->its->numa_node >= 0) {
627 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
628 if (!cpumask_intersects(mask_val, cpu_mask))
629 return -EINVAL;
630 }
631 }
632
633 cpu = cpumask_any_and(mask_val, cpu_mask);
634
621 if (cpu >= nr_cpu_ids) 635 if (cpu >= nr_cpu_ids)
622 return -EINVAL; 636 return -EINVAL;
623 637
@@ -1101,6 +1115,16 @@ static void its_cpu_init_collection(void)
1101 list_for_each_entry(its, &its_nodes, entry) { 1115 list_for_each_entry(its, &its_nodes, entry) {
1102 u64 target; 1116 u64 target;
1103 1117
1118 /* avoid cross node collections and its mapping */
1119 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1120 struct device_node *cpu_node;
1121
1122 cpu_node = of_get_cpu_node(cpu, NULL);
1123 if (its->numa_node != NUMA_NO_NODE &&
1124 its->numa_node != of_node_to_nid(cpu_node))
1125 continue;
1126 }
1127
1104 /* 1128 /*
1105 * We now have to bind each collection to its target 1129 * We now have to bind each collection to its target
1106 * redistributor. 1130 * redistributor.
@@ -1351,9 +1375,14 @@ static void its_irq_domain_activate(struct irq_domain *domain,
1351{ 1375{
1352 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1376 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1353 u32 event = its_get_event_id(d); 1377 u32 event = its_get_event_id(d);
1378 const struct cpumask *cpu_mask = cpu_online_mask;
1379
1380 /* get the cpu_mask of local node */
1381 if (its_dev->its->numa_node >= 0)
1382 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1354 1383
1355 /* Bind the LPI to the first possible CPU */ 1384 /* Bind the LPI to the first possible CPU */
1356 its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask); 1385 its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
1357 1386
1358 /* Map the GIC IRQ and event to the device */ 1387 /* Map the GIC IRQ and event to the device */
1359 its_send_mapvi(its_dev, d->hwirq, event); 1388 its_send_mapvi(its_dev, d->hwirq, event);
@@ -1443,6 +1472,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
1443 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; 1472 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
1444} 1473}
1445 1474
1475static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
1476{
1477 struct its_node *its = data;
1478
1479 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
1480}
1481
1446static const struct gic_quirk its_quirks[] = { 1482static const struct gic_quirk its_quirks[] = {
1447#ifdef CONFIG_CAVIUM_ERRATUM_22375 1483#ifdef CONFIG_CAVIUM_ERRATUM_22375
1448 { 1484 {
@@ -1452,6 +1488,14 @@ static const struct gic_quirk its_quirks[] = {
1452 .init = its_enable_quirk_cavium_22375, 1488 .init = its_enable_quirk_cavium_22375,
1453 }, 1489 },
1454#endif 1490#endif
1491#ifdef CONFIG_CAVIUM_ERRATUM_23144
1492 {
1493 .desc = "ITS: Cavium erratum 23144",
1494 .iidr = 0xa100034c, /* ThunderX pass 1.x */
1495 .mask = 0xffff0fff,
1496 .init = its_enable_quirk_cavium_23144,
1497 },
1498#endif
1455 { 1499 {
1456 } 1500 }
1457}; 1501};
@@ -1514,6 +1558,7 @@ static int __init its_probe(struct device_node *node,
1514 its->base = its_base; 1558 its->base = its_base;
1515 its->phys_base = res.start; 1559 its->phys_base = res.start;
1516 its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; 1560 its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
1561 its->numa_node = of_node_to_nid(node);
1517 1562
1518 its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); 1563 its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
1519 if (!its->cmd_base) { 1564 if (!its->cmd_base) {
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index fb042ba9a3db..2c5ba0e704bf 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -155,7 +155,7 @@ static void gic_enable_redist(bool enable)
155 155
156 while (count--) { 156 while (count--) {
157 val = readl_relaxed(rbase + GICR_WAKER); 157 val = readl_relaxed(rbase + GICR_WAKER);
158 if (enable ^ (val & GICR_WAKER_ChildrenAsleep)) 158 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
159 break; 159 break;
160 cpu_relax(); 160 cpu_relax();
161 udelay(1); 161 udelay(1);
diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c
index e7155db01d55..73addb4b625b 100644
--- a/drivers/irqchip/irq-pic32-evic.c
+++ b/drivers/irqchip/irq-pic32-evic.c
@@ -91,7 +91,7 @@ static int pic32_set_type_edge(struct irq_data *data,
91 /* set polarity for external interrupts only */ 91 /* set polarity for external interrupts only */
92 for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) { 92 for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) {
93 if (priv->ext_irqs[i] == data->hwirq) { 93 if (priv->ext_irqs[i] == data->hwirq) {
94 ret = pic32_set_ext_polarity(i + 1, flow_type); 94 ret = pic32_set_ext_polarity(i, flow_type);
95 if (ret) 95 if (ret)
96 return ret; 96 return ret;
97 } 97 }
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 70c28d19ea04..22cf60991df6 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -45,7 +45,7 @@
45#include <media/v4l2-ioctl.h> 45#include <media/v4l2-ioctl.h>
46 46
47#include <video/omapvrfb.h> 47#include <video/omapvrfb.h>
48#include <video/omapdss.h> 48#include <video/omapfb_dss.h>
49 49
50#include "omap_voutlib.h" 50#include "omap_voutlib.h"
51#include "omap_voutdef.h" 51#include "omap_voutdef.h"
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h
index 9ccfe1f475a4..94b5d65afb19 100644
--- a/drivers/media/platform/omap/omap_voutdef.h
+++ b/drivers/media/platform/omap/omap_voutdef.h
@@ -11,7 +11,7 @@
11#ifndef OMAP_VOUTDEF_H 11#ifndef OMAP_VOUTDEF_H
12#define OMAP_VOUTDEF_H 12#define OMAP_VOUTDEF_H
13 13
14#include <video/omapdss.h> 14#include <video/omapfb_dss.h>
15#include <video/omapvrfb.h> 15#include <video/omapvrfb.h>
16 16
17#define YUYV_BPP 2 17#define YUYV_BPP 2
diff --git a/drivers/media/platform/omap/omap_voutlib.c b/drivers/media/platform/omap/omap_voutlib.c
index 80b0d88f125c..58a25fdf0cce 100644
--- a/drivers/media/platform/omap/omap_voutlib.c
+++ b/drivers/media/platform/omap/omap_voutlib.c
@@ -26,7 +26,7 @@
26 26
27#include <linux/dma-mapping.h> 27#include <linux/dma-mapping.h>
28 28
29#include <video/omapdss.h> 29#include <video/omapfb_dss.h>
30 30
31#include "omap_voutlib.h" 31#include "omap_voutlib.h"
32 32
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c984321d1881..5d438ad3ee32 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1276,7 +1276,7 @@ static int mmc_select_hs200(struct mmc_card *card)
1276 * switch to HS200 mode if bus width is set successfully. 1276 * switch to HS200 mode if bus width is set successfully.
1277 */ 1277 */
1278 err = mmc_select_bus_width(card); 1278 err = mmc_select_bus_width(card);
1279 if (!err) { 1279 if (err >= 0) {
1280 val = EXT_CSD_TIMING_HS200 | 1280 val = EXT_CSD_TIMING_HS200 |
1281 card->drive_strength << EXT_CSD_DRV_STR_SHIFT; 1281 card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1282 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1282 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1583,7 +1583,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1583 } else if (mmc_card_hs(card)) { 1583 } else if (mmc_card_hs(card)) {
1584 /* Select the desired bus width optionally */ 1584 /* Select the desired bus width optionally */
1585 err = mmc_select_bus_width(card); 1585 err = mmc_select_bus_width(card);
1586 if (!err) { 1586 if (err >= 0) {
1587 err = mmc_select_hs_ddr(card); 1587 err = mmc_select_hs_ddr(card);
1588 if (err) 1588 if (err)
1589 goto free_card; 1589 goto free_card;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 7fc8b7aa83f0..2ee4c21ec55e 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -970,8 +970,8 @@ static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
970 [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, 970 [SDXC_CLK_400K] = { .output = 180, .sample = 180 },
971 [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, 971 [SDXC_CLK_25M] = { .output = 180, .sample = 75 },
972 [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, 972 [SDXC_CLK_50M] = { .output = 150, .sample = 120 },
973 [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 }, 973 [SDXC_CLK_50M_DDR] = { .output = 54, .sample = 36 },
974 [SDXC_CLK_50M_DDR_8BIT] = { .output = 90, .sample = 120 }, 974 [SDXC_CLK_50M_DDR_8BIT] = { .output = 72, .sample = 72 },
975}; 975};
976 976
977static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, 977static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
@@ -1129,11 +1129,6 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
1129 MMC_CAP_1_8V_DDR | 1129 MMC_CAP_1_8V_DDR |
1130 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; 1130 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
1131 1131
1132 /* TODO MMC DDR is not working on A80 */
1133 if (of_device_is_compatible(pdev->dev.of_node,
1134 "allwinner,sun9i-a80-mmc"))
1135 mmc->caps &= ~MMC_CAP_1_8V_DDR;
1136
1137 ret = mmc_of_parse(mmc); 1132 ret = mmc_of_parse(mmc);
1138 if (ret) 1133 if (ret)
1139 goto error_free_dma; 1134 goto error_free_dma;
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 16419f550eff..058460bdd5a6 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -141,7 +141,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
141 priv->bus = bus; 141 priv->bus = bus;
142 bus->priv = priv; 142 bus->priv = priv;
143 bus->parent = priv->dev; 143 bus->parent = priv->dev;
144 bus->name = "Synopsys MII Bus", 144 bus->name = "Synopsys MII Bus";
145 bus->read = &arc_mdio_read; 145 bus->read = &arc_mdio_read;
146 bus->write = &arc_mdio_write; 146 bus->write = &arc_mdio_write;
147 bus->reset = &arc_mdio_reset; 147 bus->reset = &arc_mdio_reset;
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index 8fc93c5f6abc..d02c4240b7df 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -96,6 +96,10 @@ struct alx_priv {
96 unsigned int rx_ringsz; 96 unsigned int rx_ringsz;
97 unsigned int rxbuf_size; 97 unsigned int rxbuf_size;
98 98
99 struct page *rx_page;
100 unsigned int rx_page_offset;
101 unsigned int rx_frag_size;
102
99 struct napi_struct napi; 103 struct napi_struct napi;
100 struct alx_tx_queue txq; 104 struct alx_tx_queue txq;
101 struct alx_rx_queue rxq; 105 struct alx_rx_queue rxq;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 9fe8b5e310d1..c98acdc0d14f 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -70,6 +70,35 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry)
70 } 70 }
71} 71}
72 72
73static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp)
74{
75 struct sk_buff *skb;
76 struct page *page;
77
78 if (alx->rx_frag_size > PAGE_SIZE)
79 return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
80
81 page = alx->rx_page;
82 if (!page) {
83 alx->rx_page = page = alloc_page(gfp);
84 if (unlikely(!page))
85 return NULL;
86 alx->rx_page_offset = 0;
87 }
88
89 skb = build_skb(page_address(page) + alx->rx_page_offset,
90 alx->rx_frag_size);
91 if (likely(skb)) {
92 alx->rx_page_offset += alx->rx_frag_size;
93 if (alx->rx_page_offset >= PAGE_SIZE)
94 alx->rx_page = NULL;
95 else
96 get_page(page);
97 }
98 return skb;
99}
100
101
73static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) 102static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
74{ 103{
75 struct alx_rx_queue *rxq = &alx->rxq; 104 struct alx_rx_queue *rxq = &alx->rxq;
@@ -86,7 +115,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
86 while (!cur_buf->skb && next != rxq->read_idx) { 115 while (!cur_buf->skb && next != rxq->read_idx) {
87 struct alx_rfd *rfd = &rxq->rfd[cur]; 116 struct alx_rfd *rfd = &rxq->rfd[cur];
88 117
89 skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); 118 skb = alx_alloc_skb(alx, gfp);
90 if (!skb) 119 if (!skb)
91 break; 120 break;
92 dma = dma_map_single(&alx->hw.pdev->dev, 121 dma = dma_map_single(&alx->hw.pdev->dev,
@@ -124,6 +153,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
124 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); 153 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
125 } 154 }
126 155
156
127 return count; 157 return count;
128} 158}
129 159
@@ -592,6 +622,11 @@ static void alx_free_rings(struct alx_priv *alx)
592 kfree(alx->txq.bufs); 622 kfree(alx->txq.bufs);
593 kfree(alx->rxq.bufs); 623 kfree(alx->rxq.bufs);
594 624
625 if (alx->rx_page) {
626 put_page(alx->rx_page);
627 alx->rx_page = NULL;
628 }
629
595 dma_free_coherent(&alx->hw.pdev->dev, 630 dma_free_coherent(&alx->hw.pdev->dev,
596 alx->descmem.size, 631 alx->descmem.size,
597 alx->descmem.virt, 632 alx->descmem.virt,
@@ -646,6 +681,7 @@ static int alx_request_irq(struct alx_priv *alx)
646 alx->dev->name, alx); 681 alx->dev->name, alx);
647 if (!err) 682 if (!err)
648 goto out; 683 goto out;
684
649 /* fall back to legacy interrupt */ 685 /* fall back to legacy interrupt */
650 pci_disable_msi(alx->hw.pdev); 686 pci_disable_msi(alx->hw.pdev);
651 } 687 }
@@ -689,6 +725,7 @@ static int alx_init_sw(struct alx_priv *alx)
689 struct pci_dev *pdev = alx->hw.pdev; 725 struct pci_dev *pdev = alx->hw.pdev;
690 struct alx_hw *hw = &alx->hw; 726 struct alx_hw *hw = &alx->hw;
691 int err; 727 int err;
728 unsigned int head_size;
692 729
693 err = alx_identify_hw(alx); 730 err = alx_identify_hw(alx);
694 if (err) { 731 if (err) {
@@ -704,7 +741,12 @@ static int alx_init_sw(struct alx_priv *alx)
704 741
705 hw->smb_timer = 400; 742 hw->smb_timer = 400;
706 hw->mtu = alx->dev->mtu; 743 hw->mtu = alx->dev->mtu;
744
707 alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); 745 alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
746 head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
747 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
748 alx->rx_frag_size = roundup_pow_of_two(head_size);
749
708 alx->tx_ringsz = 256; 750 alx->tx_ringsz = 256;
709 alx->rx_ringsz = 512; 751 alx->rx_ringsz = 512;
710 hw->imt = 200; 752 hw->imt = 200;
@@ -806,6 +848,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
806{ 848{
807 struct alx_priv *alx = netdev_priv(netdev); 849 struct alx_priv *alx = netdev_priv(netdev);
808 int max_frame = ALX_MAX_FRAME_LEN(mtu); 850 int max_frame = ALX_MAX_FRAME_LEN(mtu);
851 unsigned int head_size;
809 852
810 if ((max_frame < ALX_MIN_FRAME_SIZE) || 853 if ((max_frame < ALX_MIN_FRAME_SIZE) ||
811 (max_frame > ALX_MAX_FRAME_SIZE)) 854 (max_frame > ALX_MAX_FRAME_SIZE))
@@ -817,6 +860,9 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
817 netdev->mtu = mtu; 860 netdev->mtu = mtu;
818 alx->hw.mtu = mtu; 861 alx->hw.mtu = mtu;
819 alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); 862 alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
863 head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
864 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
865 alx->rx_frag_size = roundup_pow_of_two(head_size);
820 netdev_update_features(netdev); 866 netdev_update_features(netdev);
821 if (netif_running(netdev)) 867 if (netif_running(netdev))
822 alx_reinit(alx); 868 alx_reinit(alx);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 0a5b770cefaa..c5fe915870ad 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13941,14 +13941,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
13941 bp->doorbells = bnx2x_vf_doorbells(bp); 13941 bp->doorbells = bnx2x_vf_doorbells(bp);
13942 rc = bnx2x_vf_pci_alloc(bp); 13942 rc = bnx2x_vf_pci_alloc(bp);
13943 if (rc) 13943 if (rc)
13944 goto init_one_exit; 13944 goto init_one_freemem;
13945 } else { 13945 } else {
13946 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); 13946 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
13947 if (doorbell_size > pci_resource_len(pdev, 2)) { 13947 if (doorbell_size > pci_resource_len(pdev, 2)) {
13948 dev_err(&bp->pdev->dev, 13948 dev_err(&bp->pdev->dev,
13949 "Cannot map doorbells, bar size too small, aborting\n"); 13949 "Cannot map doorbells, bar size too small, aborting\n");
13950 rc = -ENOMEM; 13950 rc = -ENOMEM;
13951 goto init_one_exit; 13951 goto init_one_freemem;
13952 } 13952 }
13953 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), 13953 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
13954 doorbell_size); 13954 doorbell_size);
@@ -13957,19 +13957,19 @@ static int bnx2x_init_one(struct pci_dev *pdev,
13957 dev_err(&bp->pdev->dev, 13957 dev_err(&bp->pdev->dev,
13958 "Cannot map doorbell space, aborting\n"); 13958 "Cannot map doorbell space, aborting\n");
13959 rc = -ENOMEM; 13959 rc = -ENOMEM;
13960 goto init_one_exit; 13960 goto init_one_freemem;
13961 } 13961 }
13962 13962
13963 if (IS_VF(bp)) { 13963 if (IS_VF(bp)) {
13964 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); 13964 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
13965 if (rc) 13965 if (rc)
13966 goto init_one_exit; 13966 goto init_one_freemem;
13967 } 13967 }
13968 13968
13969 /* Enable SRIOV if capability found in configuration space */ 13969 /* Enable SRIOV if capability found in configuration space */
13970 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); 13970 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
13971 if (rc) 13971 if (rc)
13972 goto init_one_exit; 13972 goto init_one_freemem;
13973 13973
13974 /* calc qm_cid_count */ 13974 /* calc qm_cid_count */
13975 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); 13975 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
@@ -13988,7 +13988,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
13988 rc = bnx2x_set_int_mode(bp); 13988 rc = bnx2x_set_int_mode(bp);
13989 if (rc) { 13989 if (rc) {
13990 dev_err(&pdev->dev, "Cannot set interrupts\n"); 13990 dev_err(&pdev->dev, "Cannot set interrupts\n");
13991 goto init_one_exit; 13991 goto init_one_freemem;
13992 } 13992 }
13993 BNX2X_DEV_INFO("set interrupts successfully\n"); 13993 BNX2X_DEV_INFO("set interrupts successfully\n");
13994 13994
@@ -13996,7 +13996,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
13996 rc = register_netdev(dev); 13996 rc = register_netdev(dev);
13997 if (rc) { 13997 if (rc) {
13998 dev_err(&pdev->dev, "Cannot register net device\n"); 13998 dev_err(&pdev->dev, "Cannot register net device\n");
13999 goto init_one_exit; 13999 goto init_one_freemem;
14000 } 14000 }
14001 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); 14001 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
14002 14002
@@ -14029,6 +14029,9 @@ static int bnx2x_init_one(struct pci_dev *pdev,
14029 14029
14030 return 0; 14030 return 0;
14031 14031
14032init_one_freemem:
14033 bnx2x_free_mem_bp(bp);
14034
14032init_one_exit: 14035init_one_exit:
14033 bnx2x_disable_pcie_error_reporting(bp); 14036 bnx2x_disable_pcie_error_reporting(bp);
14034 14037
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 085f9125cf42..06f031715b57 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -205,8 +205,10 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
205 * re-adding ourselves to the poll list. 205 * re-adding ourselves to the poll list.
206 */ 206 */
207 207
208 if (priv->tx_skb && !tx_ctrl_ct) 208 if (priv->tx_skb && !tx_ctrl_ct) {
209 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
209 napi_reschedule(napi); 210 napi_reschedule(napi);
211 }
210 } 212 }
211 213
212 return work_done; 214 return work_done;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index ca2cccc594fd..3c0255e98535 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1197,10 +1197,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1197 fec16_to_cpu(bdp->cbd_datlen), 1197 fec16_to_cpu(bdp->cbd_datlen),
1198 DMA_TO_DEVICE); 1198 DMA_TO_DEVICE);
1199 bdp->cbd_bufaddr = cpu_to_fec32(0); 1199 bdp->cbd_bufaddr = cpu_to_fec32(0);
1200 if (!skb) { 1200 if (!skb)
1201 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1201 goto skb_done;
1202 continue;
1203 }
1204 1202
1205 /* Check for errors. */ 1203 /* Check for errors. */
1206 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1204 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1239,7 +1237,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1239 1237
1240 /* Free the sk buffer associated with this last transmit */ 1238 /* Free the sk buffer associated with this last transmit */
1241 dev_kfree_skb_any(skb); 1239 dev_kfree_skb_any(skb);
1242 1240skb_done:
1243 /* Make sure the update to bdp and tx_skbuff are performed 1241 /* Make sure the update to bdp and tx_skbuff are performed
1244 * before dirty_tx 1242 * before dirty_tx
1245 */ 1243 */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 3d746c887873..67a648c7d3a9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -46,7 +46,6 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
46 u32 link_stat = priv->link; 46 u32 link_stat = priv->link;
47 struct hnae_handle *h; 47 struct hnae_handle *h;
48 48
49 assert(priv && priv->ae_handle);
50 h = priv->ae_handle; 49 h = priv->ae_handle;
51 50
52 if (priv->phy) { 51 if (priv->phy) {
@@ -646,8 +645,6 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
646{ 645{
647 struct hns_nic_priv *priv = netdev_priv(net_dev); 646 struct hns_nic_priv *priv = netdev_priv(net_dev);
648 647
649 assert(priv);
650
651 strncpy(drvinfo->version, HNAE_DRIVER_VERSION, 648 strncpy(drvinfo->version, HNAE_DRIVER_VERSION,
652 sizeof(drvinfo->version)); 649 sizeof(drvinfo->version));
653 drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; 650 drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
@@ -720,8 +717,6 @@ static int hns_set_pauseparam(struct net_device *net_dev,
720 struct hnae_handle *h; 717 struct hnae_handle *h;
721 struct hnae_ae_ops *ops; 718 struct hnae_ae_ops *ops;
722 719
723 assert(priv || priv->ae_handle);
724
725 h = priv->ae_handle; 720 h = priv->ae_handle;
726 ops = h->dev->ops; 721 ops = h->dev->ops;
727 722
@@ -780,8 +775,6 @@ static int hns_set_coalesce(struct net_device *net_dev,
780 struct hnae_ae_ops *ops; 775 struct hnae_ae_ops *ops;
781 int ret; 776 int ret;
782 777
783 assert(priv || priv->ae_handle);
784
785 ops = priv->ae_handle->dev->ops; 778 ops = priv->ae_handle->dev->ops;
786 779
787 if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs) 780 if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
@@ -1111,8 +1104,6 @@ void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
1111 struct hns_nic_priv *priv = netdev_priv(net_dev); 1104 struct hns_nic_priv *priv = netdev_priv(net_dev);
1112 struct hnae_ae_ops *ops; 1105 struct hnae_ae_ops *ops;
1113 1106
1114 assert(priv || priv->ae_handle);
1115
1116 ops = priv->ae_handle->dev->ops; 1107 ops = priv->ae_handle->dev->ops;
1117 1108
1118 cmd->version = HNS_CHIP_VERSION; 1109 cmd->version = HNS_CHIP_VERSION;
@@ -1135,8 +1126,6 @@ static int hns_get_regs_len(struct net_device *net_dev)
1135 struct hns_nic_priv *priv = netdev_priv(net_dev); 1126 struct hns_nic_priv *priv = netdev_priv(net_dev);
1136 struct hnae_ae_ops *ops; 1127 struct hnae_ae_ops *ops;
1137 1128
1138 assert(priv || priv->ae_handle);
1139
1140 ops = priv->ae_handle->dev->ops; 1129 ops = priv->ae_handle->dev->ops;
1141 if (!ops->get_regs_len) { 1130 if (!ops->get_regs_len) {
1142 netdev_err(net_dev, "ops->get_regs_len is null!\n"); 1131 netdev_err(net_dev, "ops->get_regs_len is null!\n");
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 01fccec632ec..466939f8f0cf 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -189,6 +189,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
189 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 189 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
190 hwbm_pool->construct = mvneta_bm_construct; 190 hwbm_pool->construct = mvneta_bm_construct;
191 hwbm_pool->priv = new_pool; 191 hwbm_pool->priv = new_pool;
192 spin_lock_init(&hwbm_pool->lock);
192 193
193 /* Create new pool */ 194 /* Create new pool */
194 err = mvneta_bm_pool_create(priv, new_pool); 195 err = mvneta_bm_pool_create(priv, new_pool);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index c761194bb323..fc95affaf76b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -362,7 +362,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
362 362
363 for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it)) 363 for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
364 if (bitmap_iterator_test(&it)) 364 if (bitmap_iterator_test(&it))
365 data[index++] = ((unsigned long *)&priv->stats)[i]; 365 data[index++] = ((unsigned long *)&dev->stats)[i];
366 366
367 for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it)) 367 for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
368 if (bitmap_iterator_test(&it)) 368 if (bitmap_iterator_test(&it))
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 92e0624f4cf0..19ceced6736c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1296,15 +1296,16 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
1296} 1296}
1297 1297
1298 1298
1299static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) 1299static struct rtnl_link_stats64 *
1300mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1300{ 1301{
1301 struct mlx4_en_priv *priv = netdev_priv(dev); 1302 struct mlx4_en_priv *priv = netdev_priv(dev);
1302 1303
1303 spin_lock_bh(&priv->stats_lock); 1304 spin_lock_bh(&priv->stats_lock);
1304 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); 1305 netdev_stats_to_stats64(stats, &dev->stats);
1305 spin_unlock_bh(&priv->stats_lock); 1306 spin_unlock_bh(&priv->stats_lock);
1306 1307
1307 return &priv->ret_stats; 1308 return stats;
1308} 1309}
1309 1310
1310static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 1311static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
@@ -1876,7 +1877,6 @@ static void mlx4_en_clear_stats(struct net_device *dev)
1876 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 1877 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1877 en_dbg(HW, priv, "Failed dumping statistics\n"); 1878 en_dbg(HW, priv, "Failed dumping statistics\n");
1878 1879
1879 memset(&priv->stats, 0, sizeof(priv->stats));
1880 memset(&priv->pstats, 0, sizeof(priv->pstats)); 1880 memset(&priv->pstats, 0, sizeof(priv->pstats));
1881 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 1881 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1882 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1882 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
@@ -1892,6 +1892,11 @@ static void mlx4_en_clear_stats(struct net_device *dev)
1892 priv->tx_ring[i]->bytes = 0; 1892 priv->tx_ring[i]->bytes = 0;
1893 priv->tx_ring[i]->packets = 0; 1893 priv->tx_ring[i]->packets = 0;
1894 priv->tx_ring[i]->tx_csum = 0; 1894 priv->tx_ring[i]->tx_csum = 0;
1895 priv->tx_ring[i]->tx_dropped = 0;
1896 priv->tx_ring[i]->queue_stopped = 0;
1897 priv->tx_ring[i]->wake_queue = 0;
1898 priv->tx_ring[i]->tso_packets = 0;
1899 priv->tx_ring[i]->xmit_more = 0;
1895 } 1900 }
1896 for (i = 0; i < priv->rx_ring_num; i++) { 1901 for (i = 0; i < priv->rx_ring_num; i++) {
1897 priv->rx_ring[i]->bytes = 0; 1902 priv->rx_ring[i]->bytes = 0;
@@ -2482,7 +2487,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
2482 .ndo_stop = mlx4_en_close, 2487 .ndo_stop = mlx4_en_close,
2483 .ndo_start_xmit = mlx4_en_xmit, 2488 .ndo_start_xmit = mlx4_en_xmit,
2484 .ndo_select_queue = mlx4_en_select_queue, 2489 .ndo_select_queue = mlx4_en_select_queue,
2485 .ndo_get_stats = mlx4_en_get_stats, 2490 .ndo_get_stats64 = mlx4_en_get_stats64,
2486 .ndo_set_rx_mode = mlx4_en_set_rx_mode, 2491 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2487 .ndo_set_mac_address = mlx4_en_set_mac, 2492 .ndo_set_mac_address = mlx4_en_set_mac,
2488 .ndo_validate_addr = eth_validate_addr, 2493 .ndo_validate_addr = eth_validate_addr,
@@ -2514,7 +2519,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
2514 .ndo_stop = mlx4_en_close, 2519 .ndo_stop = mlx4_en_close,
2515 .ndo_start_xmit = mlx4_en_xmit, 2520 .ndo_start_xmit = mlx4_en_xmit,
2516 .ndo_select_queue = mlx4_en_select_queue, 2521 .ndo_select_queue = mlx4_en_select_queue,
2517 .ndo_get_stats = mlx4_en_get_stats, 2522 .ndo_get_stats64 = mlx4_en_get_stats64,
2518 .ndo_set_rx_mode = mlx4_en_set_rx_mode, 2523 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2519 .ndo_set_mac_address = mlx4_en_set_mac, 2524 .ndo_set_mac_address = mlx4_en_set_mac,
2520 .ndo_validate_addr = eth_validate_addr, 2525 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 20b6c2e678b8..5aa8b751f417 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -152,8 +152,9 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
152 struct mlx4_counter tmp_counter_stats; 152 struct mlx4_counter tmp_counter_stats;
153 struct mlx4_en_stat_out_mbox *mlx4_en_stats; 153 struct mlx4_en_stat_out_mbox *mlx4_en_stats;
154 struct mlx4_en_stat_out_flow_control_mbox *flowstats; 154 struct mlx4_en_stat_out_flow_control_mbox *flowstats;
155 struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); 155 struct net_device *dev = mdev->pndev[port];
156 struct net_device_stats *stats = &priv->stats; 156 struct mlx4_en_priv *priv = netdev_priv(dev);
157 struct net_device_stats *stats = &dev->stats;
157 struct mlx4_cmd_mailbox *mailbox; 158 struct mlx4_cmd_mailbox *mailbox;
158 u64 in_mod = reset << 8 | port; 159 u64 in_mod = reset << 8 | port;
159 int err; 160 int err;
@@ -188,6 +189,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
188 } 189 }
189 stats->tx_packets = 0; 190 stats->tx_packets = 0;
190 stats->tx_bytes = 0; 191 stats->tx_bytes = 0;
192 stats->tx_dropped = 0;
191 priv->port_stats.tx_chksum_offload = 0; 193 priv->port_stats.tx_chksum_offload = 0;
192 priv->port_stats.queue_stopped = 0; 194 priv->port_stats.queue_stopped = 0;
193 priv->port_stats.wake_queue = 0; 195 priv->port_stats.wake_queue = 0;
@@ -199,6 +201,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
199 201
200 stats->tx_packets += ring->packets; 202 stats->tx_packets += ring->packets;
201 stats->tx_bytes += ring->bytes; 203 stats->tx_bytes += ring->bytes;
204 stats->tx_dropped += ring->tx_dropped;
202 priv->port_stats.tx_chksum_offload += ring->tx_csum; 205 priv->port_stats.tx_chksum_offload += ring->tx_csum;
203 priv->port_stats.queue_stopped += ring->queue_stopped; 206 priv->port_stats.queue_stopped += ring->queue_stopped;
204 priv->port_stats.wake_queue += ring->wake_queue; 207 priv->port_stats.wake_queue += ring->wake_queue;
@@ -237,21 +240,12 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
237 stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0, 240 stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0,
238 &mlx4_en_stats->MCAST_prio_1, 241 &mlx4_en_stats->MCAST_prio_1,
239 NUM_PRIORITIES); 242 NUM_PRIORITIES);
240 stats->collisions = 0;
241 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) + 243 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
242 sw_rx_dropped; 244 sw_rx_dropped;
243 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); 245 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
244 stats->rx_over_errors = 0;
245 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); 246 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
246 stats->rx_frame_errors = 0;
247 stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); 247 stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
248 stats->rx_missed_errors = 0; 248 stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP);
249 stats->tx_aborted_errors = 0;
250 stats->tx_carrier_errors = 0;
251 stats->tx_fifo_errors = 0;
252 stats->tx_heartbeat_errors = 0;
253 stats->tx_window_errors = 0;
254 stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP);
255 249
256 /* RX stats */ 250 /* RX stats */
257 priv->pkstats.rx_multicast_packets = stats->multicast; 251 priv->pkstats.rx_multicast_packets = stats->multicast;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index f6e61570cb2c..76aa4d27183c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -726,12 +726,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
726 bool inline_ok; 726 bool inline_ok;
727 u32 ring_cons; 727 u32 ring_cons;
728 728
729 if (!priv->port_up)
730 goto tx_drop;
731
732 tx_ind = skb_get_queue_mapping(skb); 729 tx_ind = skb_get_queue_mapping(skb);
733 ring = priv->tx_ring[tx_ind]; 730 ring = priv->tx_ring[tx_ind];
734 731
732 if (!priv->port_up)
733 goto tx_drop;
734
735 /* fetch ring->cons far ahead before needing it to avoid stall */ 735 /* fetch ring->cons far ahead before needing it to avoid stall */
736 ring_cons = ACCESS_ONCE(ring->cons); 736 ring_cons = ACCESS_ONCE(ring->cons);
737 737
@@ -1030,7 +1030,7 @@ tx_drop_unmap:
1030 1030
1031tx_drop: 1031tx_drop:
1032 dev_kfree_skb_any(skb); 1032 dev_kfree_skb_any(skb);
1033 priv->stats.tx_dropped++; 1033 ring->tx_dropped++;
1034 return NETDEV_TX_OK; 1034 return NETDEV_TX_OK;
1035} 1035}
1036 1036
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index cc84e09f324a..467d47ed2c39 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -270,6 +270,7 @@ struct mlx4_en_tx_ring {
270 unsigned long tx_csum; 270 unsigned long tx_csum;
271 unsigned long tso_packets; 271 unsigned long tso_packets;
272 unsigned long xmit_more; 272 unsigned long xmit_more;
273 unsigned int tx_dropped;
273 struct mlx4_bf bf; 274 struct mlx4_bf bf;
274 unsigned long queue_stopped; 275 unsigned long queue_stopped;
275 276
@@ -482,8 +483,6 @@ struct mlx4_en_priv {
482 struct mlx4_en_port_profile *prof; 483 struct mlx4_en_port_profile *prof;
483 struct net_device *dev; 484 struct net_device *dev;
484 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 485 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
485 struct net_device_stats stats;
486 struct net_device_stats ret_stats;
487 struct mlx4_en_port_state port_state; 486 struct mlx4_en_port_state port_state;
488 spinlock_t stats_lock; 487 spinlock_t stats_lock;
489 struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES]; 488 struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index b3cc3ab63799..6fc156a3918d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -205,7 +205,9 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
205 goto free_uar; 205 goto free_uar;
206 } 206 }
207 207
208 uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT); 208 uar->bf_map = io_mapping_map_wc(priv->bf_mapping,
209 uar->index << PAGE_SHIFT,
210 PAGE_SIZE);
209 if (!uar->bf_map) { 211 if (!uar->bf_map) {
210 err = -ENOMEM; 212 err = -ENOMEM;
211 goto unamp_uar; 213 goto unamp_uar;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index cbf58e1f9333..21ec1c2df2c7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -192,9 +192,10 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
192 struct dcbx_app_priority_entry *p_tbl, 192 struct dcbx_app_priority_entry *p_tbl,
193 u32 pri_tc_tbl, int count, bool dcbx_enabled) 193 u32 pri_tc_tbl, int count, bool dcbx_enabled)
194{ 194{
195 u8 tc, priority, priority_map; 195 u8 tc, priority_map;
196 enum dcbx_protocol_type type; 196 enum dcbx_protocol_type type;
197 u16 protocol_id; 197 u16 protocol_id;
198 int priority;
198 bool enable; 199 bool enable;
199 int i; 200 int i;
200 201
@@ -221,7 +222,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
221 * indication, but we only got here if there was an 222 * indication, but we only got here if there was an
222 * app tlv for the protocol, so dcbx must be enabled. 223 * app tlv for the protocol, so dcbx must be enabled.
223 */ 224 */
224 enable = !!(type == DCBX_PROTOCOL_ETH); 225 enable = !(type == DCBX_PROTOCOL_ETH);
225 226
226 qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, 227 qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
227 priority, tc, type); 228 priority, tc, type);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 089016f46f26..2d89e8c16b32 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -155,12 +155,14 @@ void qed_resc_free(struct qed_dev *cdev)
155 } 155 }
156} 156}
157 157
158static int qed_init_qm_info(struct qed_hwfn *p_hwfn) 158static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
159{ 159{
160 u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0; 160 u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
161 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 161 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
162 struct init_qm_port_params *p_qm_port; 162 struct init_qm_port_params *p_qm_port;
163 u16 num_pqs, multi_cos_tcs = 1; 163 u16 num_pqs, multi_cos_tcs = 1;
164 u8 pf_wfq = qm_info->pf_wfq;
165 u32 pf_rl = qm_info->pf_rl;
164 u16 num_vfs = 0; 166 u16 num_vfs = 0;
165 167
166#ifdef CONFIG_QED_SRIOV 168#ifdef CONFIG_QED_SRIOV
@@ -182,23 +184,28 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
182 184
183 /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete. 185 /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
184 */ 186 */
185 qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) * 187 qm_info->qm_pq_params = kcalloc(num_pqs,
186 num_pqs, GFP_KERNEL); 188 sizeof(struct init_qm_pq_params),
189 b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
187 if (!qm_info->qm_pq_params) 190 if (!qm_info->qm_pq_params)
188 goto alloc_err; 191 goto alloc_err;
189 192
190 qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) * 193 qm_info->qm_vport_params = kcalloc(num_vports,
191 num_vports, GFP_KERNEL); 194 sizeof(struct init_qm_vport_params),
195 b_sleepable ? GFP_KERNEL
196 : GFP_ATOMIC);
192 if (!qm_info->qm_vport_params) 197 if (!qm_info->qm_vport_params)
193 goto alloc_err; 198 goto alloc_err;
194 199
195 qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) * 200 qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
196 MAX_NUM_PORTS, GFP_KERNEL); 201 sizeof(struct init_qm_port_params),
202 b_sleepable ? GFP_KERNEL
203 : GFP_ATOMIC);
197 if (!qm_info->qm_port_params) 204 if (!qm_info->qm_port_params)
198 goto alloc_err; 205 goto alloc_err;
199 206
200 qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data), 207 qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
201 GFP_KERNEL); 208 b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
202 if (!qm_info->wfq_data) 209 if (!qm_info->wfq_data)
203 goto alloc_err; 210 goto alloc_err;
204 211
@@ -264,10 +271,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
264 for (i = 0; i < qm_info->num_vports; i++) 271 for (i = 0; i < qm_info->num_vports; i++)
265 qm_info->qm_vport_params[i].vport_wfq = 1; 272 qm_info->qm_vport_params[i].vport_wfq = 1;
266 273
267 qm_info->pf_wfq = 0;
268 qm_info->pf_rl = 0;
269 qm_info->vport_rl_en = 1; 274 qm_info->vport_rl_en = 1;
270 qm_info->vport_wfq_en = 1; 275 qm_info->vport_wfq_en = 1;
276 qm_info->pf_rl = pf_rl;
277 qm_info->pf_wfq = pf_wfq;
271 278
272 return 0; 279 return 0;
273 280
@@ -299,7 +306,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
299 qed_qm_info_free(p_hwfn); 306 qed_qm_info_free(p_hwfn);
300 307
301 /* initialize qed's qm data structure */ 308 /* initialize qed's qm data structure */
302 rc = qed_init_qm_info(p_hwfn); 309 rc = qed_init_qm_info(p_hwfn, false);
303 if (rc) 310 if (rc)
304 return rc; 311 return rc;
305 312
@@ -388,7 +395,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
388 goto alloc_err; 395 goto alloc_err;
389 396
390 /* Prepare and process QM requirements */ 397 /* Prepare and process QM requirements */
391 rc = qed_init_qm_info(p_hwfn); 398 rc = qed_init_qm_info(p_hwfn, true);
392 if (rc) 399 if (rc)
393 goto alloc_err; 400 goto alloc_err;
394 401
@@ -581,7 +588,14 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
581 588
582 hw_mode |= 1 << MODE_ASIC; 589 hw_mode |= 1 << MODE_ASIC;
583 590
591 if (p_hwfn->cdev->num_hwfns > 1)
592 hw_mode |= 1 << MODE_100G;
593
584 p_hwfn->hw_info.hw_mode = hw_mode; 594 p_hwfn->hw_info.hw_mode = hw_mode;
595
596 DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
597 "Configuring function for hw_mode: 0x%08x\n",
598 p_hwfn->hw_info.hw_mode);
585} 599}
586 600
587/* Init run time data for all PFs on an engine. */ 601/* Init run time data for all PFs on an engine. */
@@ -821,6 +835,11 @@ int qed_hw_init(struct qed_dev *cdev,
821 u32 load_code, param; 835 u32 load_code, param;
822 int rc, mfw_rc, i; 836 int rc, mfw_rc, i;
823 837
838 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
839 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
840 return -EINVAL;
841 }
842
824 if (IS_PF(cdev)) { 843 if (IS_PF(cdev)) {
825 rc = qed_init_fw_data(cdev, bin_fw_data); 844 rc = qed_init_fw_data(cdev, bin_fw_data);
826 if (rc != 0) 845 if (rc != 0)
@@ -2086,6 +2105,13 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
2086{ 2105{
2087 int i; 2106 int i;
2088 2107
2108 if (cdev->num_hwfns > 1) {
2109 DP_VERBOSE(cdev,
2110 NETIF_MSG_LINK,
2111 "WFQ configuration is not supported for this device\n");
2112 return;
2113 }
2114
2089 for_each_hwfn(cdev, i) { 2115 for_each_hwfn(cdev, i) {
2090 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2116 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2091 2117
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 8b22f87033ce..753064679bde 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -413,15 +413,17 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
413 /* Fallthrough */ 413 /* Fallthrough */
414 414
415 case QED_INT_MODE_MSI: 415 case QED_INT_MODE_MSI:
416 rc = pci_enable_msi(cdev->pdev); 416 if (cdev->num_hwfns == 1) {
417 if (!rc) { 417 rc = pci_enable_msi(cdev->pdev);
418 int_params->out.int_mode = QED_INT_MODE_MSI; 418 if (!rc) {
419 goto out; 419 int_params->out.int_mode = QED_INT_MODE_MSI;
420 } 420 goto out;
421 }
421 422
422 DP_NOTICE(cdev, "Failed to enable MSI\n"); 423 DP_NOTICE(cdev, "Failed to enable MSI\n");
423 if (force_mode) 424 if (force_mode)
424 goto out; 425 goto out;
426 }
425 /* Fallthrough */ 427 /* Fallthrough */
426 428
427 case QED_INT_MODE_INTA: 429 case QED_INT_MODE_INTA:
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 1bc75358cbc4..ad3cae3b7243 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -230,7 +230,10 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
230 case ETH_SS_PRIV_FLAGS: 230 case ETH_SS_PRIV_FLAGS:
231 return QEDE_PRI_FLAG_LEN; 231 return QEDE_PRI_FLAG_LEN;
232 case ETH_SS_TEST: 232 case ETH_SS_TEST:
233 return QEDE_ETHTOOL_TEST_MAX; 233 if (!IS_VF(edev))
234 return QEDE_ETHTOOL_TEST_MAX;
235 else
236 return 0;
234 default: 237 default:
235 DP_VERBOSE(edev, QED_MSG_DEBUG, 238 DP_VERBOSE(edev, QED_MSG_DEBUG,
236 "Unsupported stringset 0x%08x\n", stringset); 239 "Unsupported stringset 0x%08x\n", stringset);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 337e839ca586..5d00d1404bfc 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1824,7 +1824,7 @@ static int qede_set_vf_rate(struct net_device *dev, int vfidx,
1824{ 1824{
1825 struct qede_dev *edev = netdev_priv(dev); 1825 struct qede_dev *edev = netdev_priv(dev);
1826 1826
1827 return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate, 1827 return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
1828 max_tx_rate); 1828 max_tx_rate);
1829} 1829}
1830 1830
@@ -2091,6 +2091,29 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
2091 edev->accept_any_vlan = false; 2091 edev->accept_any_vlan = false;
2092} 2092}
2093 2093
2094int qede_set_features(struct net_device *dev, netdev_features_t features)
2095{
2096 struct qede_dev *edev = netdev_priv(dev);
2097 netdev_features_t changes = features ^ dev->features;
2098 bool need_reload = false;
2099
2100 /* No action needed if hardware GRO is disabled during driver load */
2101 if (changes & NETIF_F_GRO) {
2102 if (dev->features & NETIF_F_GRO)
2103 need_reload = !edev->gro_disable;
2104 else
2105 need_reload = edev->gro_disable;
2106 }
2107
2108 if (need_reload && netif_running(edev->ndev)) {
2109 dev->features = features;
2110 qede_reload(edev, NULL, NULL);
2111 return 1;
2112 }
2113
2114 return 0;
2115}
2116
2094#ifdef CONFIG_QEDE_VXLAN 2117#ifdef CONFIG_QEDE_VXLAN
2095static void qede_add_vxlan_port(struct net_device *dev, 2118static void qede_add_vxlan_port(struct net_device *dev,
2096 sa_family_t sa_family, __be16 port) 2119 sa_family_t sa_family, __be16 port)
@@ -2175,6 +2198,7 @@ static const struct net_device_ops qede_netdev_ops = {
2175#endif 2198#endif
2176 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, 2199 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
2177 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, 2200 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
2201 .ndo_set_features = qede_set_features,
2178 .ndo_get_stats64 = qede_get_stats64, 2202 .ndo_get_stats64 = qede_get_stats64,
2179#ifdef CONFIG_QED_SRIOV 2203#ifdef CONFIG_QED_SRIOV
2180 .ndo_set_vf_link_state = qede_set_vf_link_state, 2204 .ndo_set_vf_link_state = qede_set_vf_link_state,
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 83d72106471c..fd5d1c93b55b 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4846,7 +4846,6 @@ static void ql_eeh_close(struct net_device *ndev)
4846 } 4846 }
4847 4847
4848 /* Disabling the timer */ 4848 /* Disabling the timer */
4849 del_timer_sync(&qdev->timer);
4850 ql_cancel_all_work_sync(qdev); 4849 ql_cancel_all_work_sync(qdev);
4851 4850
4852 for (i = 0; i < qdev->rss_ring_count; i++) 4851 for (i = 0; i < qdev->rss_ring_count; i++)
@@ -4873,6 +4872,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4873 return PCI_ERS_RESULT_CAN_RECOVER; 4872 return PCI_ERS_RESULT_CAN_RECOVER;
4874 case pci_channel_io_frozen: 4873 case pci_channel_io_frozen:
4875 netif_device_detach(ndev); 4874 netif_device_detach(ndev);
4875 del_timer_sync(&qdev->timer);
4876 if (netif_running(ndev)) 4876 if (netif_running(ndev))
4877 ql_eeh_close(ndev); 4877 ql_eeh_close(ndev);
4878 pci_disable_device(pdev); 4878 pci_disable_device(pdev);
@@ -4880,6 +4880,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4880 case pci_channel_io_perm_failure: 4880 case pci_channel_io_perm_failure:
4881 dev_err(&pdev->dev, 4881 dev_err(&pdev->dev,
4882 "%s: pci_channel_io_perm_failure.\n", __func__); 4882 "%s: pci_channel_io_perm_failure.\n", __func__);
4883 del_timer_sync(&qdev->timer);
4883 ql_eeh_close(ndev); 4884 ql_eeh_close(ndev);
4884 set_bit(QL_EEH_FATAL, &qdev->flags); 4885 set_bit(QL_EEH_FATAL, &qdev->flags);
4885 return PCI_ERS_RESULT_DISCONNECT; 4886 return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 1681084cc96f..1f309127457d 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -619,6 +619,17 @@ fail:
619 return rc; 619 return rc;
620} 620}
621 621
622static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
623{
624 struct efx_channel *channel;
625 struct efx_tx_queue *tx_queue;
626
627 /* All our existing PIO buffers went away */
628 efx_for_each_channel(channel, efx)
629 efx_for_each_channel_tx_queue(tx_queue, channel)
630 tx_queue->piobuf = NULL;
631}
632
622#else /* !EFX_USE_PIO */ 633#else /* !EFX_USE_PIO */
623 634
624static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) 635static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
@@ -635,6 +646,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx)
635{ 646{
636} 647}
637 648
649static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
650{
651}
652
638#endif /* EFX_USE_PIO */ 653#endif /* EFX_USE_PIO */
639 654
640static void efx_ef10_remove(struct efx_nic *efx) 655static void efx_ef10_remove(struct efx_nic *efx)
@@ -1018,6 +1033,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
1018 nic_data->must_realloc_vis = true; 1033 nic_data->must_realloc_vis = true;
1019 nic_data->must_restore_filters = true; 1034 nic_data->must_restore_filters = true;
1020 nic_data->must_restore_piobufs = true; 1035 nic_data->must_restore_piobufs = true;
1036 efx_ef10_forget_old_piobufs(efx);
1021 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; 1037 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1022 1038
1023 /* Driver-created vswitches and vports must be re-created */ 1039 /* Driver-created vswitches and vports must be re-created */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 0705ec869487..097f363f1630 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1726,14 +1726,33 @@ static int efx_probe_filters(struct efx_nic *efx)
1726 1726
1727#ifdef CONFIG_RFS_ACCEL 1727#ifdef CONFIG_RFS_ACCEL
1728 if (efx->type->offload_features & NETIF_F_NTUPLE) { 1728 if (efx->type->offload_features & NETIF_F_NTUPLE) {
1729 efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters, 1729 struct efx_channel *channel;
1730 sizeof(*efx->rps_flow_id), 1730 int i, success = 1;
1731 GFP_KERNEL); 1731
1732 if (!efx->rps_flow_id) { 1732 efx_for_each_channel(channel, efx) {
1733 channel->rps_flow_id =
1734 kcalloc(efx->type->max_rx_ip_filters,
1735 sizeof(*channel->rps_flow_id),
1736 GFP_KERNEL);
1737 if (!channel->rps_flow_id)
1738 success = 0;
1739 else
1740 for (i = 0;
1741 i < efx->type->max_rx_ip_filters;
1742 ++i)
1743 channel->rps_flow_id[i] =
1744 RPS_FLOW_ID_INVALID;
1745 }
1746
1747 if (!success) {
1748 efx_for_each_channel(channel, efx)
1749 kfree(channel->rps_flow_id);
1733 efx->type->filter_table_remove(efx); 1750 efx->type->filter_table_remove(efx);
1734 rc = -ENOMEM; 1751 rc = -ENOMEM;
1735 goto out_unlock; 1752 goto out_unlock;
1736 } 1753 }
1754
1755 efx->rps_expire_index = efx->rps_expire_channel = 0;
1737 } 1756 }
1738#endif 1757#endif
1739out_unlock: 1758out_unlock:
@@ -1744,7 +1763,10 @@ out_unlock:
1744static void efx_remove_filters(struct efx_nic *efx) 1763static void efx_remove_filters(struct efx_nic *efx)
1745{ 1764{
1746#ifdef CONFIG_RFS_ACCEL 1765#ifdef CONFIG_RFS_ACCEL
1747 kfree(efx->rps_flow_id); 1766 struct efx_channel *channel;
1767
1768 efx_for_each_channel(channel, efx)
1769 kfree(channel->rps_flow_id);
1748#endif 1770#endif
1749 down_write(&efx->filter_sem); 1771 down_write(&efx->filter_sem);
1750 efx->type->filter_table_remove(efx); 1772 efx->type->filter_table_remove(efx);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 38c422321cda..d13ddf9703ff 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -403,6 +403,8 @@ enum efx_sync_events_state {
403 * @event_test_cpu: Last CPU to handle interrupt or test event for this channel 403 * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
404 * @irq_count: Number of IRQs since last adaptive moderation decision 404 * @irq_count: Number of IRQs since last adaptive moderation decision
405 * @irq_mod_score: IRQ moderation score 405 * @irq_mod_score: IRQ moderation score
406 * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
407 * indexed by filter ID
406 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors 408 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
407 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors 409 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
408 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors 410 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
@@ -446,6 +448,8 @@ struct efx_channel {
446 unsigned int irq_mod_score; 448 unsigned int irq_mod_score;
447#ifdef CONFIG_RFS_ACCEL 449#ifdef CONFIG_RFS_ACCEL
448 unsigned int rfs_filters_added; 450 unsigned int rfs_filters_added;
451#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
452 u32 *rps_flow_id;
449#endif 453#endif
450 454
451 unsigned n_rx_tobe_disc; 455 unsigned n_rx_tobe_disc;
@@ -889,9 +893,9 @@ struct vfdi_status;
889 * @filter_sem: Filter table rw_semaphore, for freeing the table 893 * @filter_sem: Filter table rw_semaphore, for freeing the table
890 * @filter_lock: Filter table lock, for mere content changes 894 * @filter_lock: Filter table lock, for mere content changes
891 * @filter_state: Architecture-dependent filter table state 895 * @filter_state: Architecture-dependent filter table state
892 * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, 896 * @rps_expire_channel: Next channel to check for expiry
893 * indexed by filter ID 897 * @rps_expire_index: Next index to check for expiry in
894 * @rps_expire_index: Next index to check for expiry in @rps_flow_id 898 * @rps_expire_channel's @rps_flow_id
895 * @active_queues: Count of RX and TX queues that haven't been flushed and drained. 899 * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
896 * @rxq_flush_pending: Count of number of receive queues that need to be flushed. 900 * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
897 * Decremented when the efx_flush_rx_queue() is called. 901 * Decremented when the efx_flush_rx_queue() is called.
@@ -1035,7 +1039,7 @@ struct efx_nic {
1035 spinlock_t filter_lock; 1039 spinlock_t filter_lock;
1036 void *filter_state; 1040 void *filter_state;
1037#ifdef CONFIG_RFS_ACCEL 1041#ifdef CONFIG_RFS_ACCEL
1038 u32 *rps_flow_id; 1042 unsigned int rps_expire_channel;
1039 unsigned int rps_expire_index; 1043 unsigned int rps_expire_index;
1040#endif 1044#endif
1041 1045
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 8956995b2fe7..02b0b5272c14 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -842,33 +842,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
842 struct efx_nic *efx = netdev_priv(net_dev); 842 struct efx_nic *efx = netdev_priv(net_dev);
843 struct efx_channel *channel; 843 struct efx_channel *channel;
844 struct efx_filter_spec spec; 844 struct efx_filter_spec spec;
845 const __be16 *ports; 845 struct flow_keys fk;
846 __be16 ether_type;
847 int nhoff;
848 int rc; 846 int rc;
849 847
850 /* The core RPS/RFS code has already parsed and validated 848 if (flow_id == RPS_FLOW_ID_INVALID)
851 * VLAN, IP and transport headers. We assume they are in the 849 return -EINVAL;
852 * header area.
853 */
854
855 if (skb->protocol == htons(ETH_P_8021Q)) {
856 const struct vlan_hdr *vh =
857 (const struct vlan_hdr *)skb->data;
858 850
859 /* We can't filter on the IP 5-tuple and the vlan 851 if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
860 * together, so just strip the vlan header and filter 852 return -EPROTONOSUPPORT;
861 * on the IP part.
862 */
863 EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
864 ether_type = vh->h_vlan_encapsulated_proto;
865 nhoff = sizeof(struct vlan_hdr);
866 } else {
867 ether_type = skb->protocol;
868 nhoff = 0;
869 }
870 853
871 if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6)) 854 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
855 return -EPROTONOSUPPORT;
856 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
872 return -EPROTONOSUPPORT; 857 return -EPROTONOSUPPORT;
873 858
874 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 859 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
@@ -878,56 +863,41 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
878 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | 863 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
879 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | 864 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
880 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; 865 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
881 spec.ether_type = ether_type; 866 spec.ether_type = fk.basic.n_proto;
882 867 spec.ip_proto = fk.basic.ip_proto;
883 if (ether_type == htons(ETH_P_IP)) { 868
884 const struct iphdr *ip = 869 if (fk.basic.n_proto == htons(ETH_P_IP)) {
885 (const struct iphdr *)(skb->data + nhoff); 870 spec.rem_host[0] = fk.addrs.v4addrs.src;
886 871 spec.loc_host[0] = fk.addrs.v4addrs.dst;
887 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
888 if (ip_is_fragment(ip))
889 return -EPROTONOSUPPORT;
890 spec.ip_proto = ip->protocol;
891 spec.rem_host[0] = ip->saddr;
892 spec.loc_host[0] = ip->daddr;
893 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
894 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
895 } else { 872 } else {
896 const struct ipv6hdr *ip6 = 873 memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
897 (const struct ipv6hdr *)(skb->data + nhoff); 874 memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
898
899 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
900 nhoff + sizeof(*ip6) + 4);
901 spec.ip_proto = ip6->nexthdr;
902 memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
903 memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
904 ports = (const __be16 *)(ip6 + 1);
905 } 875 }
906 876
907 spec.rem_port = ports[0]; 877 spec.rem_port = fk.ports.src;
908 spec.loc_port = ports[1]; 878 spec.loc_port = fk.ports.dst;
909 879
910 rc = efx->type->filter_rfs_insert(efx, &spec); 880 rc = efx->type->filter_rfs_insert(efx, &spec);
911 if (rc < 0) 881 if (rc < 0)
912 return rc; 882 return rc;
913 883
914 /* Remember this so we can check whether to expire the filter later */ 884 /* Remember this so we can check whether to expire the filter later */
915 efx->rps_flow_id[rc] = flow_id; 885 channel = efx_get_channel(efx, rxq_index);
916 channel = efx_get_channel(efx, skb_get_rx_queue(skb)); 886 channel->rps_flow_id[rc] = flow_id;
917 ++channel->rfs_filters_added; 887 ++channel->rfs_filters_added;
918 888
919 if (ether_type == htons(ETH_P_IP)) 889 if (spec.ether_type == htons(ETH_P_IP))
920 netif_info(efx, rx_status, efx->net_dev, 890 netif_info(efx, rx_status, efx->net_dev,
921 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", 891 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
922 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 892 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
923 spec.rem_host, ntohs(ports[0]), spec.loc_host, 893 spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
924 ntohs(ports[1]), rxq_index, flow_id, rc); 894 ntohs(spec.loc_port), rxq_index, flow_id, rc);
925 else 895 else
926 netif_info(efx, rx_status, efx->net_dev, 896 netif_info(efx, rx_status, efx->net_dev,
927 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", 897 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
928 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 898 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
929 spec.rem_host, ntohs(ports[0]), spec.loc_host, 899 spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
930 ntohs(ports[1]), rxq_index, flow_id, rc); 900 ntohs(spec.loc_port), rxq_index, flow_id, rc);
931 901
932 return rc; 902 return rc;
933} 903}
@@ -935,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
935bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) 905bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
936{ 906{
937 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); 907 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
938 unsigned int index, size; 908 unsigned int channel_idx, index, size;
939 u32 flow_id; 909 u32 flow_id;
940 910
941 if (!spin_trylock_bh(&efx->filter_lock)) 911 if (!spin_trylock_bh(&efx->filter_lock))
942 return false; 912 return false;
943 913
944 expire_one = efx->type->filter_rfs_expire_one; 914 expire_one = efx->type->filter_rfs_expire_one;
915 channel_idx = efx->rps_expire_channel;
945 index = efx->rps_expire_index; 916 index = efx->rps_expire_index;
946 size = efx->type->max_rx_ip_filters; 917 size = efx->type->max_rx_ip_filters;
947 while (quota--) { 918 while (quota--) {
948 flow_id = efx->rps_flow_id[index]; 919 struct efx_channel *channel = efx_get_channel(efx, channel_idx);
949 if (expire_one(efx, flow_id, index)) 920 flow_id = channel->rps_flow_id[index];
921
922 if (flow_id != RPS_FLOW_ID_INVALID &&
923 expire_one(efx, flow_id, index)) {
950 netif_info(efx, rx_status, efx->net_dev, 924 netif_info(efx, rx_status, efx->net_dev,
951 "expired filter %d [flow %u]\n", 925 "expired filter %d [queue %u flow %u]\n",
952 index, flow_id); 926 index, channel_idx, flow_id);
953 if (++index == size) 927 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
928 }
929 if (++index == size) {
930 if (++channel_idx == efx->n_channels)
931 channel_idx = 0;
954 index = 0; 932 index = 0;
933 }
955 } 934 }
935 efx->rps_expire_channel = channel_idx;
956 efx->rps_expire_index = index; 936 efx->rps_expire_index = index;
957 937
958 spin_unlock_bh(&efx->filter_lock); 938 spin_unlock_bh(&efx->filter_lock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 3f83c369f56c..ec295851812b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -297,7 +297,7 @@ int stmmac_mdio_register(struct net_device *ndev)
297 return -ENOMEM; 297 return -ENOMEM;
298 298
299 if (mdio_bus_data->irqs) 299 if (mdio_bus_data->irqs)
300 memcpy(new_bus->irq, mdio_bus_data, sizeof(new_bus->irq)); 300 memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq));
301 301
302#ifdef CONFIG_OF 302#ifdef CONFIG_OF
303 if (priv->device->of_node) 303 if (priv->device->of_node)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a0f64cba86ba..2ace126533cd 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -990,7 +990,7 @@ static void team_port_disable(struct team *team,
990#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ 990#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
991 NETIF_F_RXCSUM | NETIF_F_ALL_TSO) 991 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
992 992
993static void __team_compute_features(struct team *team) 993static void ___team_compute_features(struct team *team)
994{ 994{
995 struct team_port *port; 995 struct team_port *port;
996 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; 996 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
@@ -1021,15 +1021,20 @@ static void __team_compute_features(struct team *team)
1021 team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1021 team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1022 if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) 1022 if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1023 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; 1023 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1024}
1024 1025
1026static void __team_compute_features(struct team *team)
1027{
1028 ___team_compute_features(team);
1025 netdev_change_features(team->dev); 1029 netdev_change_features(team->dev);
1026} 1030}
1027 1031
1028static void team_compute_features(struct team *team) 1032static void team_compute_features(struct team *team)
1029{ 1033{
1030 mutex_lock(&team->lock); 1034 mutex_lock(&team->lock);
1031 __team_compute_features(team); 1035 ___team_compute_features(team);
1032 mutex_unlock(&team->lock); 1036 mutex_unlock(&team->lock);
1037 netdev_change_features(team->dev);
1033} 1038}
1034 1039
1035static int team_port_enter(struct team *team, struct team_port *port) 1040static int team_port_enter(struct team *team, struct team_port *port)
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 36cd7f016a8d..9bbe0161a2f4 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -473,7 +473,7 @@ static void read_bulk_callback(struct urb *urb)
473 goto goon; 473 goto goon;
474 } 474 }
475 475
476 if (!count || count < 4) 476 if (count < 4)
477 goto goon; 477 goto goon;
478 478
479 rx_status = buf[count - 2]; 479 rx_status = buf[count - 2];
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index d9d2806a47b1..dc989a8b5afb 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -61,6 +61,8 @@
61#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \ 61#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
62 SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3) 62 SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
63 63
64#define CARRIER_CHECK_DELAY (2 * HZ)
65
64struct smsc95xx_priv { 66struct smsc95xx_priv {
65 u32 mac_cr; 67 u32 mac_cr;
66 u32 hash_hi; 68 u32 hash_hi;
@@ -69,6 +71,9 @@ struct smsc95xx_priv {
69 spinlock_t mac_cr_lock; 71 spinlock_t mac_cr_lock;
70 u8 features; 72 u8 features;
71 u8 suspend_flags; 73 u8 suspend_flags;
74 bool link_ok;
75 struct delayed_work carrier_check;
76 struct usbnet *dev;
72}; 77};
73 78
74static bool turbo_mode = true; 79static bool turbo_mode = true;
@@ -624,6 +629,44 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
624 intdata); 629 intdata);
625} 630}
626 631
632static void set_carrier(struct usbnet *dev, bool link)
633{
634 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
635
636 if (pdata->link_ok == link)
637 return;
638
639 pdata->link_ok = link;
640
641 if (link)
642 usbnet_link_change(dev, 1, 0);
643 else
644 usbnet_link_change(dev, 0, 0);
645}
646
647static void check_carrier(struct work_struct *work)
648{
649 struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv,
650 carrier_check.work);
651 struct usbnet *dev = pdata->dev;
652 int ret;
653
654 if (pdata->suspend_flags != 0)
655 return;
656
657 ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR);
658 if (ret < 0) {
659 netdev_warn(dev->net, "Failed to read MII_BMSR\n");
660 return;
661 }
662 if (ret & BMSR_LSTATUS)
663 set_carrier(dev, 1);
664 else
665 set_carrier(dev, 0);
666
667 schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
668}
669
627/* Enable or disable Tx & Rx checksum offload engines */ 670/* Enable or disable Tx & Rx checksum offload engines */
628static int smsc95xx_set_features(struct net_device *netdev, 671static int smsc95xx_set_features(struct net_device *netdev,
629 netdev_features_t features) 672 netdev_features_t features)
@@ -1165,13 +1208,20 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1165 dev->net->flags |= IFF_MULTICAST; 1208 dev->net->flags |= IFF_MULTICAST;
1166 dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; 1209 dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
1167 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 1210 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1211
1212 pdata->dev = dev;
1213 INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier);
1214 schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
1215
1168 return 0; 1216 return 0;
1169} 1217}
1170 1218
1171static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) 1219static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
1172{ 1220{
1173 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 1221 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
1222
1174 if (pdata) { 1223 if (pdata) {
1224 cancel_delayed_work(&pdata->carrier_check);
1175 netif_dbg(dev, ifdown, dev->net, "free pdata\n"); 1225 netif_dbg(dev, ifdown, dev->net, "free pdata\n");
1176 kfree(pdata); 1226 kfree(pdata);
1177 pdata = NULL; 1227 pdata = NULL;
@@ -1695,6 +1745,7 @@ static int smsc95xx_resume(struct usb_interface *intf)
1695 1745
1696 /* do this first to ensure it's cleared even in error case */ 1746 /* do this first to ensure it's cleared even in error case */
1697 pdata->suspend_flags = 0; 1747 pdata->suspend_flags = 0;
1748 schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
1698 1749
1699 if (suspend_flags & SUSPEND_ALLMODES) { 1750 if (suspend_flags & SUSPEND_ALLMODES) {
1700 /* clear wake-up sources */ 1751 /* clear wake-up sources */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 49d84e540343..e0638e556fe7 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1925,24 +1925,11 @@ static int virtnet_probe(struct virtio_device *vdev)
1925 1925
1926 virtio_device_ready(vdev); 1926 virtio_device_ready(vdev);
1927 1927
1928 /* Last of all, set up some receive buffers. */
1929 for (i = 0; i < vi->curr_queue_pairs; i++) {
1930 try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
1931
1932 /* If we didn't even get one input buffer, we're useless. */
1933 if (vi->rq[i].vq->num_free ==
1934 virtqueue_get_vring_size(vi->rq[i].vq)) {
1935 free_unused_bufs(vi);
1936 err = -ENOMEM;
1937 goto free_recv_bufs;
1938 }
1939 }
1940
1941 vi->nb.notifier_call = &virtnet_cpu_callback; 1928 vi->nb.notifier_call = &virtnet_cpu_callback;
1942 err = register_hotcpu_notifier(&vi->nb); 1929 err = register_hotcpu_notifier(&vi->nb);
1943 if (err) { 1930 if (err) {
1944 pr_debug("virtio_net: registering cpu notifier failed\n"); 1931 pr_debug("virtio_net: registering cpu notifier failed\n");
1945 goto free_recv_bufs; 1932 goto free_unregister_netdev;
1946 } 1933 }
1947 1934
1948 /* Assume link up if device can't report link status, 1935 /* Assume link up if device can't report link status,
@@ -1960,10 +1947,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1960 1947
1961 return 0; 1948 return 0;
1962 1949
1963free_recv_bufs: 1950free_unregister_netdev:
1964 vi->vdev->config->reset(vdev); 1951 vi->vdev->config->reset(vdev);
1965 1952
1966 free_receive_bufs(vi);
1967 unregister_netdev(dev); 1953 unregister_netdev(dev);
1968free_vqs: 1954free_vqs:
1969 cancel_delayed_work_sync(&vi->refill); 1955 cancel_delayed_work_sync(&vi->refill);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 8ff30c3bdfce..f999db2f97b4 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -3086,6 +3086,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
3086 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) 3086 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
3087 conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; 3087 conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
3088 3088
3089 if (tb[IFLA_MTU])
3090 conf.mtu = nla_get_u32(tb[IFLA_MTU]);
3091
3089 err = vxlan_dev_configure(src_net, dev, &conf); 3092 err = vxlan_dev_configure(src_net, dev, &conf);
3090 switch (err) { 3093 switch (err) {
3091 case -ENODEV: 3094 case -ENODEV:
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index f2d01d4d9364..1b8304e1efaa 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -950,17 +950,14 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
950 950
951 /* For SPIs, we need to track the affinity per IRQ */ 951 /* For SPIs, we need to track the affinity per IRQ */
952 if (using_spi) { 952 if (using_spi) {
953 if (i >= pdev->num_resources) { 953 if (i >= pdev->num_resources)
954 of_node_put(dn);
955 break; 954 break;
956 }
957 955
958 irqs[i] = cpu; 956 irqs[i] = cpu;
959 } 957 }
960 958
961 /* Keep track of the CPUs containing this PMU type */ 959 /* Keep track of the CPUs containing this PMU type */
962 cpumask_set_cpu(cpu, &pmu->supported_cpus); 960 cpumask_set_cpu(cpu, &pmu->supported_cpus);
963 of_node_put(dn);
964 i++; 961 i++;
965 } while (1); 962 } while (1);
966 963
@@ -995,9 +992,6 @@ int arm_pmu_device_probe(struct platform_device *pdev,
995 992
996 armpmu_init(pmu); 993 armpmu_init(pmu);
997 994
998 if (!__oprofile_cpu_pmu)
999 __oprofile_cpu_pmu = pmu;
1000
1001 pmu->plat_device = pdev; 995 pmu->plat_device = pdev;
1002 996
1003 if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { 997 if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
@@ -1033,6 +1027,9 @@ int arm_pmu_device_probe(struct platform_device *pdev,
1033 if (ret) 1027 if (ret)
1034 goto out_destroy; 1028 goto out_destroy;
1035 1029
1030 if (!__oprofile_cpu_pmu)
1031 __oprofile_cpu_pmu = pmu;
1032
1036 pr_info("enabled with %s PMU driver, %d counters available\n", 1033 pr_info("enabled with %s PMU driver, %d counters available\n",
1037 pmu->name, pmu->num_events); 1034 pmu->name, pmu->num_events);
1038 1035
@@ -1043,6 +1040,7 @@ out_destroy:
1043out_free: 1040out_free:
1044 pr_info("%s: failed to register PMU devices!\n", 1041 pr_info("%s: failed to register PMU devices!\n",
1045 of_node_full_name(node)); 1042 of_node_full_name(node));
1043 kfree(pmu->irq_affinity);
1046 kfree(pmu); 1044 kfree(pmu);
1047 return ret; 1045 return ret;
1048} 1046}
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 207b13b618cf..a607655d7830 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1256,9 +1256,10 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
1256 const struct mtk_desc_pin *pin; 1256 const struct mtk_desc_pin *pin;
1257 1257
1258 chained_irq_enter(chip, desc); 1258 chained_irq_enter(chip, desc);
1259 for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) { 1259 for (eint_num = 0;
1260 eint_num < pctl->devdata->ap_num;
1261 eint_num += 32, reg += 4) {
1260 status = readl(reg); 1262 status = readl(reg);
1261 reg += 4;
1262 while (status) { 1263 while (status) {
1263 offset = __ffs(status); 1264 offset = __ffs(status);
1264 index = eint_num + offset; 1265 index = eint_num + offset;
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index ccbfc325c778..38faceff2f08 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -854,7 +854,7 @@ static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset)
854 854
855 clk_enable(nmk_chip->clk); 855 clk_enable(nmk_chip->clk);
856 856
857 dir = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset)); 857 dir = !(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
858 858
859 clk_disable(nmk_chip->clk); 859 clk_disable(nmk_chip->clk);
860 860
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 579fd65299a0..d637c933c8a9 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -208,14 +208,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
208 break; 208 break;
209 209
210 case PTP_SYS_OFFSET: 210 case PTP_SYS_OFFSET:
211 sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL); 211 sysoff = memdup_user((void __user *)arg, sizeof(*sysoff));
212 if (!sysoff) { 212 if (IS_ERR(sysoff)) {
213 err = -ENOMEM; 213 err = PTR_ERR(sysoff);
214 break; 214 sysoff = NULL;
215 }
216 if (copy_from_user(sysoff, (void __user *)arg,
217 sizeof(*sysoff))) {
218 err = -EFAULT;
219 break; 215 break;
220 } 216 }
221 if (sysoff->n_samples > PTP_MAX_SAMPLES) { 217 if (sysoff->n_samples > PTP_MAX_SAMPLES) {
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 8f90d9e77104..969c312de1be 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -621,6 +621,11 @@ struct aac_driver_ident
621#define AAC_QUIRK_SCSI_32 0x0020 621#define AAC_QUIRK_SCSI_32 0x0020
622 622
623/* 623/*
624 * SRC based adapters support the AifReqEvent functions
625 */
626#define AAC_QUIRK_SRC 0x0040
627
628/*
624 * The adapter interface specs all queues to be located in the same 629 * The adapter interface specs all queues to be located in the same
625 * physically contiguous block. The host structure that defines the 630 * physically contiguous block. The host structure that defines the
626 * commuication queues will assume they are each a separate physically 631 * commuication queues will assume they are each a separate physically
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index a943bd230bc2..79871f3519ff 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -236,10 +236,10 @@ static struct aac_driver_ident aac_drivers[] = {
236 { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ 236 { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
237 { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ 237 { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
238 { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */ 238 { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */
239 { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */ 239 { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
240 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 7 (Denali) */ 240 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
241 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 8 */ 241 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
242 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Series 9 */ 242 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */
243}; 243};
244 244
245/** 245/**
@@ -1299,7 +1299,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1299 else 1299 else
1300 shost->this_id = shost->max_id; 1300 shost->this_id = shost->max_id;
1301 1301
1302 aac_intr_normal(aac, 0, 2, 0, NULL); 1302 if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
1303 aac_intr_normal(aac, 0, 2, 0, NULL);
1303 1304
1304 /* 1305 /*
1305 * dmb - we may need to move the setting of these parms somewhere else once 1306 * dmb - we may need to move the setting of these parms somewhere else once
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 6a4df5a315e9..6bff13e7afc7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -7975,13 +7975,14 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
7975 ActiveCableEventData = 7975 ActiveCableEventData =
7976 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; 7976 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
7977 if (ActiveCableEventData->ReasonCode == 7977 if (ActiveCableEventData->ReasonCode ==
7978 MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) 7978 MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) {
7979 pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d", 7979 pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d",
7980 ioc->name, ActiveCableEventData->ReceptacleID); 7980 ioc->name, ActiveCableEventData->ReceptacleID);
7981 pr_info("cannot be powered and devices connected to this active cable"); 7981 pr_info("cannot be powered and devices connected to this active cable");
7982 pr_info("will not be seen. This active cable"); 7982 pr_info("will not be seen. This active cable");
7983 pr_info("requires %d mW of power", 7983 pr_info("requires %d mW of power",
7984 ActiveCableEventData->ActiveCablePowerRequirement); 7984 ActiveCableEventData->ActiveCablePowerRequirement);
7985 }
7985 break; 7986 break;
7986 7987
7987 default: /* ignore the rest */ 7988 default: /* ignore the rest */
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b2e332af0f51..c71344aebdbb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -821,9 +821,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
821 } 821 }
822 822
823 /* 823 /*
824 * If we finished all bytes in the request we are done now. 824 * special case: failed zero length commands always need to
825 * drop down into the retry code. Otherwise, if we finished
826 * all bytes in the request we are done now.
825 */ 827 */
826 if (!scsi_end_request(req, error, good_bytes, 0)) 828 if (!(blk_rq_bytes(req) == 0 && error) &&
829 !scsi_end_request(req, error, good_bytes, 0))
827 return; 830 return;
828 831
829 /* 832 /*
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 428c03ef02b2..f459dff30512 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1398,11 +1398,15 @@ static int media_not_present(struct scsi_disk *sdkp,
1398 **/ 1398 **/
1399static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) 1399static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
1400{ 1400{
1401 struct scsi_disk *sdkp = scsi_disk(disk); 1401 struct scsi_disk *sdkp = scsi_disk_get(disk);
1402 struct scsi_device *sdp = sdkp->device; 1402 struct scsi_device *sdp;
1403 struct scsi_sense_hdr *sshdr = NULL; 1403 struct scsi_sense_hdr *sshdr = NULL;
1404 int retval; 1404 int retval;
1405 1405
1406 if (!sdkp)
1407 return 0;
1408
1409 sdp = sdkp->device;
1406 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); 1410 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
1407 1411
1408 /* 1412 /*
@@ -1459,6 +1463,7 @@ out:
1459 kfree(sshdr); 1463 kfree(sshdr);
1460 retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; 1464 retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
1461 sdp->changed = 0; 1465 sdp->changed = 0;
1466 scsi_disk_put(sdkp);
1462 return retval; 1467 return retval;
1463} 1468}
1464 1469
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index b56885c14839..ebb34dca60df 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -68,7 +68,8 @@ struct sync_timeline {
68 68
69 /* protected by child_list_lock */ 69 /* protected by child_list_lock */
70 bool destroyed; 70 bool destroyed;
71 int context, value; 71 u64 context;
72 int value;
72 73
73 struct list_head child_list_head; 74 struct list_head child_list_head;
74 spinlock_t child_list_lock; 75 spinlock_t child_list_lock;
diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c
index 13d431cbd29e..a578cd257db4 100644
--- a/drivers/thermal/int340x_thermal/int3406_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3406_thermal.c
@@ -177,7 +177,7 @@ static int int3406_thermal_probe(struct platform_device *pdev)
177 return -ENODEV; 177 return -ENODEV;
178 d->raw_bd = bd; 178 d->raw_bd = bd;
179 179
180 ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br); 180 ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br, NULL);
181 if (ret) 181 if (ret)
182 return ret; 182 return ret;
183 183
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 82c4d2e45319..95103054c0e4 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -120,17 +120,6 @@ config UNIX98_PTYS
120 All modern Linux systems use the Unix98 ptys. Say Y unless 120 All modern Linux systems use the Unix98 ptys. Say Y unless
121 you're on an embedded system and want to conserve memory. 121 you're on an embedded system and want to conserve memory.
122 122
123config DEVPTS_MULTIPLE_INSTANCES
124 bool "Support multiple instances of devpts"
125 depends on UNIX98_PTYS
126 default n
127 ---help---
128 Enable support for multiple instances of devpts filesystem.
129 If you want to have isolated PTY namespaces (eg: in containers),
130 say Y here. Otherwise, say N. If enabled, each mount of devpts
131 filesystem with the '-o newinstance' option will create an
132 independent PTY namespace.
133
134config LEGACY_PTYS 123config LEGACY_PTYS
135 bool "Legacy (BSD) PTY support" 124 bool "Legacy (BSD) PTY support"
136 default y 125 default y
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index dd4b8417e7f4..f856c4544eea 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -668,7 +668,7 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
668 else 668 else
669 fsi = tty->link->driver_data; 669 fsi = tty->link->driver_data;
670 devpts_kill_index(fsi, tty->index); 670 devpts_kill_index(fsi, tty->index);
671 devpts_put_ref(fsi); 671 devpts_release(fsi);
672} 672}
673 673
674static const struct tty_operations ptm_unix98_ops = { 674static const struct tty_operations ptm_unix98_ops = {
@@ -733,10 +733,11 @@ static int ptmx_open(struct inode *inode, struct file *filp)
733 if (retval) 733 if (retval)
734 return retval; 734 return retval;
735 735
736 fsi = devpts_get_ref(inode, filp); 736 fsi = devpts_acquire(filp);
737 retval = -ENODEV; 737 if (IS_ERR(fsi)) {
738 if (!fsi) 738 retval = PTR_ERR(fsi);
739 goto out_free_file; 739 goto out_free_file;
740 }
740 741
741 /* find a device that is not in use. */ 742 /* find a device that is not in use. */
742 mutex_lock(&devpts_mutex); 743 mutex_lock(&devpts_mutex);
@@ -745,7 +746,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
745 746
746 retval = index; 747 retval = index;
747 if (index < 0) 748 if (index < 0)
748 goto out_put_ref; 749 goto out_put_fsi;
749 750
750 751
751 mutex_lock(&tty_mutex); 752 mutex_lock(&tty_mutex);
@@ -789,8 +790,8 @@ err_release:
789 return retval; 790 return retval;
790out: 791out:
791 devpts_kill_index(fsi, index); 792 devpts_kill_index(fsi, index);
792out_put_ref: 793out_put_fsi:
793 devpts_put_ref(fsi); 794 devpts_release(fsi);
794out_free_file: 795out_free_file:
795 tty_free_file(filp); 796 tty_free_file(filp);
796 return retval; 797 return retval;
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 93601407dab8..688691d9058d 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -749,7 +749,8 @@ static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos,
749 if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4) 749 if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4)
750 return count; 750 return count;
751 } else { 751 } else {
752 if (pci_read_vpd(pdev, addr, 4, &data) != 4) 752 data = 0;
753 if (pci_read_vpd(pdev, addr, 4, &data) < 0)
753 return count; 754 return count;
754 *pdata = cpu_to_le32(data); 755 *pdata = cpu_to_le32(data);
755 } 756 }
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index e9ea3fef144a..15ecfc9c5f6c 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -228,9 +228,9 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
228 228
229static void vfio_intx_disable(struct vfio_pci_device *vdev) 229static void vfio_intx_disable(struct vfio_pci_device *vdev)
230{ 230{
231 vfio_intx_set_signal(vdev, -1);
232 vfio_virqfd_disable(&vdev->ctx[0].unmask); 231 vfio_virqfd_disable(&vdev->ctx[0].unmask);
233 vfio_virqfd_disable(&vdev->ctx[0].mask); 232 vfio_virqfd_disable(&vdev->ctx[0].mask);
233 vfio_intx_set_signal(vdev, -1);
234 vdev->irq_type = VFIO_PCI_NUM_IRQS; 234 vdev->irq_type = VFIO_PCI_NUM_IRQS;
235 vdev->num_ctx = 0; 235 vdev->num_ctx = 0;
236 kfree(vdev->ctx); 236 kfree(vdev->ctx);
@@ -401,13 +401,13 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
401 struct pci_dev *pdev = vdev->pdev; 401 struct pci_dev *pdev = vdev->pdev;
402 int i; 402 int i;
403 403
404 vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
405
406 for (i = 0; i < vdev->num_ctx; i++) { 404 for (i = 0; i < vdev->num_ctx; i++) {
407 vfio_virqfd_disable(&vdev->ctx[i].unmask); 405 vfio_virqfd_disable(&vdev->ctx[i].unmask);
408 vfio_virqfd_disable(&vdev->ctx[i].mask); 406 vfio_virqfd_disable(&vdev->ctx[i].mask);
409 } 407 }
410 408
409 vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
410
411 if (msix) { 411 if (msix) {
412 pci_disable_msix(vdev->pdev); 412 pci_disable_msix(vdev->pdev);
413 kfree(vdev->msix); 413 kfree(vdev->msix);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 15a65823aad9..2ba19424e4a1 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -515,7 +515,7 @@ static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova,
515 unsigned long pfn, long npage, int prot) 515 unsigned long pfn, long npage, int prot)
516{ 516{
517 long i; 517 long i;
518 int ret; 518 int ret = 0;
519 519
520 for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) { 520 for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) {
521 ret = iommu_map(domain->domain, iova, 521 ret = iommu_map(domain->domain, iova,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
index 8511c648a15c..9d78411a3bf7 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
@@ -14,7 +14,7 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/of.h> 15#include <linux/of.h>
16 16
17#include <video/omapdss.h> 17#include <video/omapfb_dss.h>
18#include <video/omap-panel-data.h> 18#include <video/omap-panel-data.h>
19 19
20struct panel_drv_data { 20struct panel_drv_data {
@@ -25,7 +25,6 @@ struct panel_drv_data {
25 25
26 struct omap_video_timings timings; 26 struct omap_video_timings timings;
27 27
28 enum omap_dss_venc_type connector_type;
29 bool invert_polarity; 28 bool invert_polarity;
30}; 29};
31 30
@@ -45,10 +44,6 @@ static const struct omap_video_timings tvc_pal_timings = {
45 44
46static const struct of_device_id tvc_of_match[]; 45static const struct of_device_id tvc_of_match[];
47 46
48struct tvc_of_data {
49 enum omap_dss_venc_type connector_type;
50};
51
52#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) 47#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
53 48
54static int tvc_connect(struct omap_dss_device *dssdev) 49static int tvc_connect(struct omap_dss_device *dssdev)
@@ -99,7 +94,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
99 in->ops.atv->set_timings(in, &ddata->timings); 94 in->ops.atv->set_timings(in, &ddata->timings);
100 95
101 if (!ddata->dev->of_node) { 96 if (!ddata->dev->of_node) {
102 in->ops.atv->set_type(in, ddata->connector_type); 97 in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE);
103 98
104 in->ops.atv->invert_vid_out_polarity(in, 99 in->ops.atv->invert_vid_out_polarity(in,
105 ddata->invert_polarity); 100 ddata->invert_polarity);
@@ -207,7 +202,6 @@ static int tvc_probe_pdata(struct platform_device *pdev)
207 202
208 ddata->in = in; 203 ddata->in = in;
209 204
210 ddata->connector_type = pdata->connector_type;
211 ddata->invert_polarity = pdata->invert_polarity; 205 ddata->invert_polarity = pdata->invert_polarity;
212 206
213 dssdev = &ddata->dssdev; 207 dssdev = &ddata->dssdev;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
index d811e6dcaef7..06e1db34541e 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
@@ -16,8 +16,7 @@
16 16
17#include <drm/drm_edid.h> 17#include <drm/drm_edid.h>
18 18
19#include <video/omapdss.h> 19#include <video/omapfb_dss.h>
20#include <video/omap-panel-data.h>
21 20
22static const struct omap_video_timings dvic_default_timings = { 21static const struct omap_video_timings dvic_default_timings = {
23 .x_res = 640, 22 .x_res = 640,
@@ -236,46 +235,6 @@ static struct omap_dss_driver dvic_driver = {
236 .detect = dvic_detect, 235 .detect = dvic_detect,
237}; 236};
238 237
239static int dvic_probe_pdata(struct platform_device *pdev)
240{
241 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
242 struct connector_dvi_platform_data *pdata;
243 struct omap_dss_device *in, *dssdev;
244 int i2c_bus_num;
245
246 pdata = dev_get_platdata(&pdev->dev);
247 i2c_bus_num = pdata->i2c_bus_num;
248
249 if (i2c_bus_num != -1) {
250 struct i2c_adapter *adapter;
251
252 adapter = i2c_get_adapter(i2c_bus_num);
253 if (!adapter) {
254 dev_err(&pdev->dev,
255 "Failed to get I2C adapter, bus %d\n",
256 i2c_bus_num);
257 return -EPROBE_DEFER;
258 }
259
260 ddata->i2c_adapter = adapter;
261 }
262
263 in = omap_dss_find_output(pdata->source);
264 if (in == NULL) {
265 i2c_put_adapter(ddata->i2c_adapter);
266
267 dev_err(&pdev->dev, "Failed to find video source\n");
268 return -EPROBE_DEFER;
269 }
270
271 ddata->in = in;
272
273 dssdev = &ddata->dssdev;
274 dssdev->name = pdata->name;
275
276 return 0;
277}
278
279static int dvic_probe_of(struct platform_device *pdev) 238static int dvic_probe_of(struct platform_device *pdev)
280{ 239{
281 struct panel_drv_data *ddata = platform_get_drvdata(pdev); 240 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -313,23 +272,18 @@ static int dvic_probe(struct platform_device *pdev)
313 struct omap_dss_device *dssdev; 272 struct omap_dss_device *dssdev;
314 int r; 273 int r;
315 274
275 if (!pdev->dev.of_node)
276 return -ENODEV;
277
316 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); 278 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
317 if (!ddata) 279 if (!ddata)
318 return -ENOMEM; 280 return -ENOMEM;
319 281
320 platform_set_drvdata(pdev, ddata); 282 platform_set_drvdata(pdev, ddata);
321 283
322 if (dev_get_platdata(&pdev->dev)) { 284 r = dvic_probe_of(pdev);
323 r = dvic_probe_pdata(pdev); 285 if (r)
324 if (r) 286 return r;
325 return r;
326 } else if (pdev->dev.of_node) {
327 r = dvic_probe_of(pdev);
328 if (r)
329 return r;
330 } else {
331 return -ENODEV;
332 }
333 287
334 ddata->timings = dvic_default_timings; 288 ddata->timings = dvic_default_timings;
335 289
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
index 6ee4129bc0c0..58d5803ede67 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
@@ -17,8 +17,7 @@
17 17
18#include <drm/drm_edid.h> 18#include <drm/drm_edid.h>
19 19
20#include <video/omapdss.h> 20#include <video/omapfb_dss.h>
21#include <video/omap-panel-data.h>
22 21
23static const struct omap_video_timings hdmic_default_timings = { 22static const struct omap_video_timings hdmic_default_timings = {
24 .x_res = 640, 23 .x_res = 640,
@@ -206,30 +205,6 @@ static struct omap_dss_driver hdmic_driver = {
206 .set_hdmi_infoframe = hdmic_set_infoframe, 205 .set_hdmi_infoframe = hdmic_set_infoframe,
207}; 206};
208 207
209static int hdmic_probe_pdata(struct platform_device *pdev)
210{
211 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
212 struct connector_hdmi_platform_data *pdata;
213 struct omap_dss_device *in, *dssdev;
214
215 pdata = dev_get_platdata(&pdev->dev);
216
217 ddata->hpd_gpio = -ENODEV;
218
219 in = omap_dss_find_output(pdata->source);
220 if (in == NULL) {
221 dev_err(&pdev->dev, "Failed to find video source\n");
222 return -EPROBE_DEFER;
223 }
224
225 ddata->in = in;
226
227 dssdev = &ddata->dssdev;
228 dssdev->name = pdata->name;
229
230 return 0;
231}
232
233static int hdmic_probe_of(struct platform_device *pdev) 208static int hdmic_probe_of(struct platform_device *pdev)
234{ 209{
235 struct panel_drv_data *ddata = platform_get_drvdata(pdev); 210 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -261,6 +236,9 @@ static int hdmic_probe(struct platform_device *pdev)
261 struct omap_dss_device *dssdev; 236 struct omap_dss_device *dssdev;
262 int r; 237 int r;
263 238
239 if (!pdev->dev.of_node)
240 return -ENODEV;
241
264 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); 242 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
265 if (!ddata) 243 if (!ddata)
266 return -ENOMEM; 244 return -ENOMEM;
@@ -268,17 +246,9 @@ static int hdmic_probe(struct platform_device *pdev)
268 platform_set_drvdata(pdev, ddata); 246 platform_set_drvdata(pdev, ddata);
269 ddata->dev = &pdev->dev; 247 ddata->dev = &pdev->dev;
270 248
271 if (dev_get_platdata(&pdev->dev)) { 249 r = hdmic_probe_of(pdev);
272 r = hdmic_probe_pdata(pdev); 250 if (r)
273 if (r) 251 return r;
274 return r;
275 } else if (pdev->dev.of_node) {
276 r = hdmic_probe_of(pdev);
277 if (r)
278 return r;
279 } else {
280 return -ENODEV;
281 }
282 252
283 if (gpio_is_valid(ddata->hpd_gpio)) { 253 if (gpio_is_valid(ddata->hpd_gpio)) {
284 r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio, 254 r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
index 8c246c213e06..a9a67167cc3d 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
@@ -20,7 +20,7 @@
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/of_gpio.h> 21#include <linux/of_gpio.h>
22 22
23#include <video/omapdss.h> 23#include <video/omapfb_dss.h>
24 24
25struct panel_drv_data { 25struct panel_drv_data {
26 struct omap_dss_device dssdev; 26 struct omap_dss_device dssdev;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c
index d9048b3df495..8c0953d069b7 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c
@@ -15,8 +15,7 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/of_gpio.h> 16#include <linux/of_gpio.h>
17 17
18#include <video/omapdss.h> 18#include <video/omapfb_dss.h>
19#include <video/omap-panel-data.h>
20 19
21struct panel_drv_data { 20struct panel_drv_data {
22 struct omap_dss_device dssdev; 21 struct omap_dss_device dssdev;
@@ -166,32 +165,6 @@ static const struct omapdss_dvi_ops tfp410_dvi_ops = {
166 .get_timings = tfp410_get_timings, 165 .get_timings = tfp410_get_timings,
167}; 166};
168 167
169static int tfp410_probe_pdata(struct platform_device *pdev)
170{
171 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
172 struct encoder_tfp410_platform_data *pdata;
173 struct omap_dss_device *dssdev, *in;
174
175 pdata = dev_get_platdata(&pdev->dev);
176
177 ddata->pd_gpio = pdata->power_down_gpio;
178
179 ddata->data_lines = pdata->data_lines;
180
181 in = omap_dss_find_output(pdata->source);
182 if (in == NULL) {
183 dev_err(&pdev->dev, "Failed to find video source\n");
184 return -ENODEV;
185 }
186
187 ddata->in = in;
188
189 dssdev = &ddata->dssdev;
190 dssdev->name = pdata->name;
191
192 return 0;
193}
194
195static int tfp410_probe_of(struct platform_device *pdev) 168static int tfp410_probe_of(struct platform_device *pdev)
196{ 169{
197 struct panel_drv_data *ddata = platform_get_drvdata(pdev); 170 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -225,23 +198,18 @@ static int tfp410_probe(struct platform_device *pdev)
225 struct omap_dss_device *dssdev; 198 struct omap_dss_device *dssdev;
226 int r; 199 int r;
227 200
201 if (!pdev->dev.of_node)
202 return -ENODEV;
203
228 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); 204 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
229 if (!ddata) 205 if (!ddata)
230 return -ENOMEM; 206 return -ENOMEM;
231 207
232 platform_set_drvdata(pdev, ddata); 208 platform_set_drvdata(pdev, ddata);
233 209
234 if (dev_get_platdata(&pdev->dev)) { 210 r = tfp410_probe_of(pdev);
235 r = tfp410_probe_pdata(pdev); 211 if (r)
236 if (r) 212 return r;
237 return r;
238 } else if (pdev->dev.of_node) {
239 r = tfp410_probe_of(pdev);
240 if (r)
241 return r;
242 } else {
243 return -ENODEV;
244 }
245 213
246 if (gpio_is_valid(ddata->pd_gpio)) { 214 if (gpio_is_valid(ddata->pd_gpio)) {
247 r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio, 215 r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
index 677e2545fcbe..80dc47347e21 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
@@ -16,8 +16,7 @@
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/gpio/consumer.h> 17#include <linux/gpio/consumer.h>
18 18
19#include <video/omapdss.h> 19#include <video/omapfb_dss.h>
20#include <video/omap-panel-data.h>
21 20
22struct panel_drv_data { 21struct panel_drv_data {
23 struct omap_dss_device dssdev; 22 struct omap_dss_device dssdev;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
index e780fd4f8b46..ace3d818afe5 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
@@ -16,7 +16,7 @@
16#include <linux/of.h> 16#include <linux/of.h>
17#include <linux/of_gpio.h> 17#include <linux/of_gpio.h>
18 18
19#include <video/omapdss.h> 19#include <video/omapfb_dss.h>
20#include <video/omap-panel-data.h> 20#include <video/omap-panel-data.h>
21#include <video/of_display_timing.h> 21#include <video/of_display_timing.h>
22 22
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
index 3414c2609320..b58012b82b6f 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
@@ -25,8 +25,7 @@
25#include <linux/of_device.h> 25#include <linux/of_device.h>
26#include <linux/of_gpio.h> 26#include <linux/of_gpio.h>
27 27
28#include <video/omapdss.h> 28#include <video/omapfb_dss.h>
29#include <video/omap-panel-data.h>
30#include <video/mipi_display.h> 29#include <video/mipi_display.h>
31 30
32/* DSI Virtual channel. Hardcoded for now. */ 31/* DSI Virtual channel. Hardcoded for now. */
@@ -1127,40 +1126,6 @@ static struct omap_dss_driver dsicm_ops = {
1127 .memory_read = dsicm_memory_read, 1126 .memory_read = dsicm_memory_read,
1128}; 1127};
1129 1128
1130static int dsicm_probe_pdata(struct platform_device *pdev)
1131{
1132 const struct panel_dsicm_platform_data *pdata;
1133 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
1134 struct omap_dss_device *dssdev, *in;
1135
1136 pdata = dev_get_platdata(&pdev->dev);
1137
1138 in = omap_dss_find_output(pdata->source);
1139 if (in == NULL) {
1140 dev_err(&pdev->dev, "failed to find video source\n");
1141 return -EPROBE_DEFER;
1142 }
1143 ddata->in = in;
1144
1145 ddata->reset_gpio = pdata->reset_gpio;
1146
1147 if (pdata->use_ext_te)
1148 ddata->ext_te_gpio = pdata->ext_te_gpio;
1149 else
1150 ddata->ext_te_gpio = -1;
1151
1152 ddata->ulps_timeout = pdata->ulps_timeout;
1153
1154 ddata->use_dsi_backlight = pdata->use_dsi_backlight;
1155
1156 ddata->pin_config = pdata->pin_config;
1157
1158 dssdev = &ddata->dssdev;
1159 dssdev->name = pdata->name;
1160
1161 return 0;
1162}
1163
1164static int dsicm_probe_of(struct platform_device *pdev) 1129static int dsicm_probe_of(struct platform_device *pdev)
1165{ 1130{
1166 struct device_node *node = pdev->dev.of_node; 1131 struct device_node *node = pdev->dev.of_node;
@@ -1207,6 +1172,9 @@ static int dsicm_probe(struct platform_device *pdev)
1207 1172
1208 dev_dbg(dev, "probe\n"); 1173 dev_dbg(dev, "probe\n");
1209 1174
1175 if (!pdev->dev.of_node)
1176 return -ENODEV;
1177
1210 ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); 1178 ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
1211 if (!ddata) 1179 if (!ddata)
1212 return -ENOMEM; 1180 return -ENOMEM;
@@ -1214,17 +1182,9 @@ static int dsicm_probe(struct platform_device *pdev)
1214 platform_set_drvdata(pdev, ddata); 1182 platform_set_drvdata(pdev, ddata);
1215 ddata->pdev = pdev; 1183 ddata->pdev = pdev;
1216 1184
1217 if (dev_get_platdata(dev)) { 1185 r = dsicm_probe_of(pdev);
1218 r = dsicm_probe_pdata(pdev); 1186 if (r)
1219 if (r) 1187 return r;
1220 return r;
1221 } else if (pdev->dev.of_node) {
1222 r = dsicm_probe_of(pdev);
1223 if (r)
1224 return r;
1225 } else {
1226 return -ENODEV;
1227 }
1228 1188
1229 ddata->timings.x_res = 864; 1189 ddata->timings.x_res = 864;
1230 ddata->timings.y_res = 480; 1190 ddata->timings.y_res = 480;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
index 18eb60e9c9ec..f14691ce8d02 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
@@ -16,8 +16,7 @@
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/gpio.h> 17#include <linux/gpio.h>
18 18
19#include <video/omapdss.h> 19#include <video/omapfb_dss.h>
20#include <video/omap-panel-data.h>
21 20
22static struct omap_video_timings lb035q02_timings = { 21static struct omap_video_timings lb035q02_timings = {
23 .x_res = 320, 22 .x_res = 320,
@@ -240,44 +239,6 @@ static struct omap_dss_driver lb035q02_ops = {
240 .get_resolution = omapdss_default_get_resolution, 239 .get_resolution = omapdss_default_get_resolution,
241}; 240};
242 241
243static int lb035q02_probe_pdata(struct spi_device *spi)
244{
245 const struct panel_lb035q02_platform_data *pdata;
246 struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
247 struct omap_dss_device *dssdev, *in;
248 int r;
249
250 pdata = dev_get_platdata(&spi->dev);
251
252 in = omap_dss_find_output(pdata->source);
253 if (in == NULL) {
254 dev_err(&spi->dev, "failed to find video source '%s'\n",
255 pdata->source);
256 return -EPROBE_DEFER;
257 }
258
259 ddata->in = in;
260
261 ddata->data_lines = pdata->data_lines;
262
263 dssdev = &ddata->dssdev;
264 dssdev->name = pdata->name;
265
266 r = devm_gpio_request_one(&spi->dev, pdata->enable_gpio,
267 GPIOF_OUT_INIT_LOW, "panel enable");
268 if (r)
269 goto err_gpio;
270
271 ddata->enable_gpio = gpio_to_desc(pdata->enable_gpio);
272
273 ddata->backlight_gpio = pdata->backlight_gpio;
274
275 return 0;
276err_gpio:
277 omap_dss_put_device(ddata->in);
278 return r;
279}
280
281static int lb035q02_probe_of(struct spi_device *spi) 242static int lb035q02_probe_of(struct spi_device *spi)
282{ 243{
283 struct device_node *node = spi->dev.of_node; 244 struct device_node *node = spi->dev.of_node;
@@ -312,6 +273,9 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
312 struct omap_dss_device *dssdev; 273 struct omap_dss_device *dssdev;
313 int r; 274 int r;
314 275
276 if (!spi->dev.of_node)
277 return -ENODEV;
278
315 ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL); 279 ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
316 if (ddata == NULL) 280 if (ddata == NULL)
317 return -ENOMEM; 281 return -ENOMEM;
@@ -320,17 +284,9 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
320 284
321 ddata->spi = spi; 285 ddata->spi = spi;
322 286
323 if (dev_get_platdata(&spi->dev)) { 287 r = lb035q02_probe_of(spi);
324 r = lb035q02_probe_pdata(spi); 288 if (r)
325 if (r) 289 return r;
326 return r;
327 } else if (spi->dev.of_node) {
328 r = lb035q02_probe_of(spi);
329 if (r)
330 return r;
331 } else {
332 return -ENODEV;
333 }
334 290
335 if (gpio_is_valid(ddata->backlight_gpio)) { 291 if (gpio_is_valid(ddata->backlight_gpio)) {
336 r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio, 292 r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
index 8a928c9a2fc9..a2cbadd3eca3 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
@@ -18,8 +18,7 @@
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/of_gpio.h> 19#include <linux/of_gpio.h>
20 20
21#include <video/omapdss.h> 21#include <video/omapfb_dss.h>
22#include <video/omap-panel-data.h>
23 22
24struct panel_drv_data { 23struct panel_drv_data {
25 struct omap_dss_device dssdev; 24 struct omap_dss_device dssdev;
@@ -233,33 +232,6 @@ static struct omap_dss_driver nec_8048_ops = {
233}; 232};
234 233
235 234
236static int nec_8048_probe_pdata(struct spi_device *spi)
237{
238 const struct panel_nec_nl8048hl11_platform_data *pdata;
239 struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
240 struct omap_dss_device *dssdev, *in;
241
242 pdata = dev_get_platdata(&spi->dev);
243
244 ddata->qvga_gpio = pdata->qvga_gpio;
245 ddata->res_gpio = pdata->res_gpio;
246
247 in = omap_dss_find_output(pdata->source);
248 if (in == NULL) {
249 dev_err(&spi->dev, "failed to find video source '%s'\n",
250 pdata->source);
251 return -EPROBE_DEFER;
252 }
253 ddata->in = in;
254
255 ddata->data_lines = pdata->data_lines;
256
257 dssdev = &ddata->dssdev;
258 dssdev->name = pdata->name;
259
260 return 0;
261}
262
263static int nec_8048_probe_of(struct spi_device *spi) 235static int nec_8048_probe_of(struct spi_device *spi)
264{ 236{
265 struct device_node *node = spi->dev.of_node; 237 struct device_node *node = spi->dev.of_node;
@@ -296,6 +268,9 @@ static int nec_8048_probe(struct spi_device *spi)
296 268
297 dev_dbg(&spi->dev, "%s\n", __func__); 269 dev_dbg(&spi->dev, "%s\n", __func__);
298 270
271 if (!spi->dev.of_node)
272 return -ENODEV;
273
299 spi->mode = SPI_MODE_0; 274 spi->mode = SPI_MODE_0;
300 spi->bits_per_word = 32; 275 spi->bits_per_word = 32;
301 276
@@ -315,17 +290,9 @@ static int nec_8048_probe(struct spi_device *spi)
315 290
316 ddata->spi = spi; 291 ddata->spi = spi;
317 292
318 if (dev_get_platdata(&spi->dev)) { 293 r = nec_8048_probe_of(spi);
319 r = nec_8048_probe_pdata(spi); 294 if (r)
320 if (r) 295 return r;
321 return r;
322 } else if (spi->dev.of_node) {
323 r = nec_8048_probe_of(spi);
324 if (r)
325 return r;
326 } else {
327 return -ENODEV;
328 }
329 296
330 if (gpio_is_valid(ddata->qvga_gpio)) { 297 if (gpio_is_valid(ddata->qvga_gpio)) {
331 r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio, 298 r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
index 1954ec913ce5..a8be18a87fa0 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
@@ -17,8 +17,7 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/regulator/consumer.h> 19#include <linux/regulator/consumer.h>
20#include <video/omapdss.h> 20#include <video/omapfb_dss.h>
21#include <video/omap-panel-data.h>
22 21
23struct panel_drv_data { 22struct panel_drv_data {
24 struct omap_dss_device dssdev; 23 struct omap_dss_device dssdev;
@@ -197,69 +196,6 @@ static struct omap_dss_driver sharp_ls_ops = {
197 .get_resolution = omapdss_default_get_resolution, 196 .get_resolution = omapdss_default_get_resolution,
198}; 197};
199 198
200static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
201 char *desc, struct gpio_desc **gpiod)
202{
203 int r;
204
205 r = devm_gpio_request_one(dev, gpio, flags, desc);
206 if (r) {
207 *gpiod = NULL;
208 return r == -ENOENT ? 0 : r;
209 }
210
211 *gpiod = gpio_to_desc(gpio);
212
213 return 0;
214}
215
216static int sharp_ls_probe_pdata(struct platform_device *pdev)
217{
218 const struct panel_sharp_ls037v7dw01_platform_data *pdata;
219 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
220 struct omap_dss_device *dssdev, *in;
221 int r;
222
223 pdata = dev_get_platdata(&pdev->dev);
224
225 in = omap_dss_find_output(pdata->source);
226 if (in == NULL) {
227 dev_err(&pdev->dev, "failed to find video source '%s'\n",
228 pdata->source);
229 return -EPROBE_DEFER;
230 }
231
232 ddata->in = in;
233
234 ddata->data_lines = pdata->data_lines;
235
236 dssdev = &ddata->dssdev;
237 dssdev->name = pdata->name;
238
239 r = sharp_ls_get_gpio(&pdev->dev, pdata->mo_gpio, GPIOF_OUT_INIT_LOW,
240 "lcd MO", &ddata->mo_gpio);
241 if (r)
242 return r;
243 r = sharp_ls_get_gpio(&pdev->dev, pdata->lr_gpio, GPIOF_OUT_INIT_HIGH,
244 "lcd LR", &ddata->lr_gpio);
245 if (r)
246 return r;
247 r = sharp_ls_get_gpio(&pdev->dev, pdata->ud_gpio, GPIOF_OUT_INIT_HIGH,
248 "lcd UD", &ddata->ud_gpio);
249 if (r)
250 return r;
251 r = sharp_ls_get_gpio(&pdev->dev, pdata->resb_gpio, GPIOF_OUT_INIT_LOW,
252 "lcd RESB", &ddata->resb_gpio);
253 if (r)
254 return r;
255 r = sharp_ls_get_gpio(&pdev->dev, pdata->ini_gpio, GPIOF_OUT_INIT_LOW,
256 "lcd INI", &ddata->ini_gpio);
257 if (r)
258 return r;
259
260 return 0;
261}
262
263static int sharp_ls_get_gpio_of(struct device *dev, int index, int val, 199static int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
264 const char *desc, struct gpio_desc **gpiod) 200 const char *desc, struct gpio_desc **gpiod)
265{ 201{
@@ -330,23 +266,18 @@ static int sharp_ls_probe(struct platform_device *pdev)
330 struct omap_dss_device *dssdev; 266 struct omap_dss_device *dssdev;
331 int r; 267 int r;
332 268
269 if (!pdev->dev.of_node)
270 return -ENODEV;
271
333 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); 272 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
334 if (ddata == NULL) 273 if (ddata == NULL)
335 return -ENOMEM; 274 return -ENOMEM;
336 275
337 platform_set_drvdata(pdev, ddata); 276 platform_set_drvdata(pdev, ddata);
338 277
339 if (dev_get_platdata(&pdev->dev)) { 278 r = sharp_ls_probe_of(pdev);
340 r = sharp_ls_probe_pdata(pdev); 279 if (r)
341 if (r) 280 return r;
342 return r;
343 } else if (pdev->dev.of_node) {
344 r = sharp_ls_probe_of(pdev);
345 if (r)
346 return r;
347 } else {
348 return -ENODEV;
349 }
350 281
351 ddata->videomode = sharp_ls_timings; 282 ddata->videomode = sharp_ls_timings;
352 283
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
index 31efcca801bd..468560a6daae 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
@@ -33,7 +33,7 @@
33#include <linux/of.h> 33#include <linux/of.h>
34#include <linux/of_gpio.h> 34#include <linux/of_gpio.h>
35 35
36#include <video/omapdss.h> 36#include <video/omapfb_dss.h>
37#include <video/omap-panel-data.h> 37#include <video/omap-panel-data.h>
38 38
39#define MIPID_CMD_READ_DISP_ID 0x04 39#define MIPID_CMD_READ_DISP_ID 0x04
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
index 4d657f3ab679..b529a8c2b652 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
@@ -28,8 +28,7 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
30#include <linux/gpio.h> 30#include <linux/gpio.h>
31#include <video/omapdss.h> 31#include <video/omapfb_dss.h>
32#include <video/omap-panel-data.h>
33 32
34struct panel_drv_data { 33struct panel_drv_data {
35 struct omap_dss_device dssdev; 34 struct omap_dss_device dssdev;
@@ -365,31 +364,6 @@ static struct omap_dss_driver td028ttec1_ops = {
365 .check_timings = td028ttec1_panel_check_timings, 364 .check_timings = td028ttec1_panel_check_timings,
366}; 365};
367 366
368static int td028ttec1_panel_probe_pdata(struct spi_device *spi)
369{
370 const struct panel_tpo_td028ttec1_platform_data *pdata;
371 struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
372 struct omap_dss_device *dssdev, *in;
373
374 pdata = dev_get_platdata(&spi->dev);
375
376 in = omap_dss_find_output(pdata->source);
377 if (in == NULL) {
378 dev_err(&spi->dev, "failed to find video source '%s'\n",
379 pdata->source);
380 return -EPROBE_DEFER;
381 }
382
383 ddata->in = in;
384
385 ddata->data_lines = pdata->data_lines;
386
387 dssdev = &ddata->dssdev;
388 dssdev->name = pdata->name;
389
390 return 0;
391}
392
393static int td028ttec1_probe_of(struct spi_device *spi) 367static int td028ttec1_probe_of(struct spi_device *spi)
394{ 368{
395 struct device_node *node = spi->dev.of_node; 369 struct device_node *node = spi->dev.of_node;
@@ -415,6 +389,9 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
415 389
416 dev_dbg(&spi->dev, "%s\n", __func__); 390 dev_dbg(&spi->dev, "%s\n", __func__);
417 391
392 if (!spi->dev.of_node)
393 return -ENODEV;
394
418 spi->bits_per_word = 9; 395 spi->bits_per_word = 9;
419 spi->mode = SPI_MODE_3; 396 spi->mode = SPI_MODE_3;
420 397
@@ -432,17 +409,9 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
432 409
433 ddata->spi_dev = spi; 410 ddata->spi_dev = spi;
434 411
435 if (dev_get_platdata(&spi->dev)) { 412 r = td028ttec1_probe_of(spi);
436 r = td028ttec1_panel_probe_pdata(spi); 413 if (r)
437 if (r) 414 return r;
438 return r;
439 } else if (spi->dev.of_node) {
440 r = td028ttec1_probe_of(spi);
441 if (r)
442 return r;
443 } else {
444 return -ENODEV;
445 }
446 415
447 ddata->videomode = td028ttec1_panel_timings; 416 ddata->videomode = td028ttec1_panel_timings;
448 417
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
index 68e3b68a2920..51e628b85f4a 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
@@ -19,8 +19,7 @@
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/of_gpio.h> 20#include <linux/of_gpio.h>
21 21
22#include <video/omapdss.h> 22#include <video/omapfb_dss.h>
23#include <video/omap-panel-data.h>
24 23
25#define TPO_R02_MODE(x) ((x) & 7) 24#define TPO_R02_MODE(x) ((x) & 7)
26#define TPO_R02_MODE_800x480 7 25#define TPO_R02_MODE_800x480 7
@@ -465,32 +464,6 @@ static struct omap_dss_driver tpo_td043_ops = {
465}; 464};
466 465
467 466
468static int tpo_td043_probe_pdata(struct spi_device *spi)
469{
470 const struct panel_tpo_td043mtea1_platform_data *pdata;
471 struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
472 struct omap_dss_device *dssdev, *in;
473
474 pdata = dev_get_platdata(&spi->dev);
475
476 ddata->nreset_gpio = pdata->nreset_gpio;
477
478 in = omap_dss_find_output(pdata->source);
479 if (in == NULL) {
480 dev_err(&spi->dev, "failed to find video source '%s'\n",
481 pdata->source);
482 return -EPROBE_DEFER;
483 }
484 ddata->in = in;
485
486 ddata->data_lines = pdata->data_lines;
487
488 dssdev = &ddata->dssdev;
489 dssdev->name = pdata->name;
490
491 return 0;
492}
493
494static int tpo_td043_probe_of(struct spi_device *spi) 467static int tpo_td043_probe_of(struct spi_device *spi)
495{ 468{
496 struct device_node *node = spi->dev.of_node; 469 struct device_node *node = spi->dev.of_node;
@@ -524,6 +497,9 @@ static int tpo_td043_probe(struct spi_device *spi)
524 497
525 dev_dbg(&spi->dev, "%s\n", __func__); 498 dev_dbg(&spi->dev, "%s\n", __func__);
526 499
500 if (!spi->dev.of_node)
501 return -ENODEV;
502
527 spi->bits_per_word = 16; 503 spi->bits_per_word = 16;
528 spi->mode = SPI_MODE_0; 504 spi->mode = SPI_MODE_0;
529 505
@@ -541,17 +517,9 @@ static int tpo_td043_probe(struct spi_device *spi)
541 517
542 ddata->spi = spi; 518 ddata->spi = spi;
543 519
544 if (dev_get_platdata(&spi->dev)) { 520 r = tpo_td043_probe_of(spi);
545 r = tpo_td043_probe_pdata(spi); 521 if (r)
546 if (r) 522 return r;
547 return r;
548 } else if (spi->dev.of_node) {
549 r = tpo_td043_probe_of(spi);
550 if (r)
551 return r;
552 } else {
553 return -ENODEV;
554 }
555 523
556 ddata->mode = TPO_R02_MODE_800x480; 524 ddata->mode = TPO_R02_MODE_800x480;
557 memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma)); 525 memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma));
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/apply.c b/drivers/video/fbdev/omap2/omapfb/dss/apply.c
index 663ccc3bf4e5..2481f4871f66 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/apply.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/apply.c
@@ -23,7 +23,7 @@
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/jiffies.h> 24#include <linux/jiffies.h>
25 25
26#include <video/omapdss.h> 26#include <video/omapfb_dss.h>
27 27
28#include "dss.h" 28#include "dss.h"
29#include "dss_features.h" 29#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c
index 5a87179b7312..29de4827589d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c
@@ -35,7 +35,7 @@
35#include <linux/suspend.h> 35#include <linux/suspend.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37 37
38#include <video/omapdss.h> 38#include <video/omapfb_dss.h>
39 39
40#include "dss.h" 40#include "dss.h"
41#include "dss_features.h" 41#include "dss_features.h"
@@ -208,8 +208,6 @@ static int __init omap_dss_probe(struct platform_device *pdev)
208 core.default_display_name = def_disp_name; 208 core.default_display_name = def_disp_name;
209 else if (pdata->default_display_name) 209 else if (pdata->default_display_name)
210 core.default_display_name = pdata->default_display_name; 210 core.default_display_name = pdata->default_display_name;
211 else if (pdata->default_device)
212 core.default_display_name = pdata->default_device->name;
213 211
214 register_pm_notifier(&omap_dss_pm_notif_block); 212 register_pm_notifier(&omap_dss_pm_notif_block);
215 213
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
index 6607db37a5e4..3691bde4ce0a 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
@@ -26,7 +26,7 @@
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/seq_file.h> 27#include <linux/seq_file.h>
28 28
29#include <video/omapdss.h> 29#include <video/omapfb_dss.h>
30 30
31#include "dss.h" 31#include "dss.h"
32#include "dss_features.h" 32#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
index 5491e304f4fe..7a75dfda9845 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
@@ -41,7 +41,7 @@
41#include <linux/of.h> 41#include <linux/of.h>
42#include <linux/component.h> 42#include <linux/component.h>
43 43
44#include <video/omapdss.h> 44#include <video/omapfb_dss.h>
45 45
46#include "dss.h" 46#include "dss.h"
47#include "dss_features.h" 47#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c
index 038c15b04215..59c9a5c47ca9 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c
@@ -18,7 +18,7 @@
18 */ 18 */
19 19
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <video/omapdss.h> 21#include <video/omapfb_dss.h>
22 22
23#include "dispc.h" 23#include "dispc.h"
24 24
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
index 75b5286029ee..b3fdbfd0b82d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
@@ -25,7 +25,7 @@
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/sysfs.h> 26#include <linux/sysfs.h>
27 27
28#include <video/omapdss.h> 28#include <video/omapfb_dss.h>
29#include "dss.h" 29#include "dss.h"
30 30
31static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf) 31static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf)
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display.c b/drivers/video/fbdev/omap2/omapfb/dss/display.c
index ef5b9027985d..dd5468695c43 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/display.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/display.c
@@ -28,7 +28,7 @@
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/of.h> 29#include <linux/of.h>
30 30
31#include <video/omapdss.h> 31#include <video/omapfb_dss.h>
32#include "dss.h" 32#include "dss.h"
33#include "dss_features.h" 33#include "dss_features.h"
34 34
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
index 7953e6a52346..da09806b940c 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
@@ -34,7 +34,7 @@
34#include <linux/clk.h> 34#include <linux/clk.h>
35#include <linux/component.h> 35#include <linux/component.h>
36 36
37#include <video/omapdss.h> 37#include <video/omapfb_dss.h>
38 38
39#include "dss.h" 39#include "dss.h"
40#include "dss_features.h" 40#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index d63e59807707..9e4800a4e3d1 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -42,7 +42,7 @@
42#include <linux/of_platform.h> 42#include <linux/of_platform.h>
43#include <linux/component.h> 43#include <linux/component.h>
44 44
45#include <video/omapdss.h> 45#include <video/omapfb_dss.h>
46#include <video/mipi_display.h> 46#include <video/mipi_display.h>
47 47
48#include "dss.h" 48#include "dss.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
index bf407b6ba15c..d356a252ab4a 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
@@ -18,7 +18,7 @@
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20 20
21#include <video/omapdss.h> 21#include <video/omapfb_dss.h>
22 22
23#include "dss.h" 23#include "dss.h"
24 24
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
index 0078c4d1fc31..47d7f69ad9ad 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
@@ -41,7 +41,7 @@
41#include <linux/suspend.h> 41#include <linux/suspend.h>
42#include <linux/component.h> 42#include <linux/component.h>
43 43
44#include <video/omapdss.h> 44#include <video/omapfb_dss.h>
45 45
46#include "dss.h" 46#include "dss.h"
47#include "dss_features.h" 47#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.h b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
index 0184a8461df1..a3cc0ca8f9d2 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
@@ -73,6 +73,17 @@
73#define FLD_MOD(orig, val, start, end) \ 73#define FLD_MOD(orig, val, start, end) \
74 (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end)) 74 (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
75 75
76enum omap_dss_clk_source {
77 OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK
78 * OMAP4: DSS_FCLK */
79 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
80 * OMAP4: PLL1_CLK1 */
81 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
82 * OMAP4: PLL1_CLK2 */
83 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */
84 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */
85};
86
76enum dss_io_pad_mode { 87enum dss_io_pad_mode {
77 DSS_IO_PAD_MODE_RESET, 88 DSS_IO_PAD_MODE_RESET,
78 DSS_IO_PAD_MODE_RFBI, 89 DSS_IO_PAD_MODE_RFBI,
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
index c886a2927f73..8fc843b56b26 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
@@ -23,7 +23,7 @@
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25 25
26#include <video/omapdss.h> 26#include <video/omapfb_dss.h>
27 27
28#include "dss.h" 28#include "dss.h"
29#include "dss_features.h" 29#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h b/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
index 53616b02b613..f6de87e078b0 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
@@ -23,7 +23,8 @@
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/hdmi.h> 25#include <linux/hdmi.h>
26#include <video/omapdss.h> 26#include <video/omapfb_dss.h>
27#include <sound/omap-hdmi-audio.h>
27 28
28#include "dss.h" 29#include "dss.h"
29 30
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
index 2e71aec838b1..926a6f20dbb2 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
@@ -33,7 +33,7 @@
33#include <linux/gpio.h> 33#include <linux/gpio.h>
34#include <linux/regulator/consumer.h> 34#include <linux/regulator/consumer.h>
35#include <linux/component.h> 35#include <linux/component.h>
36#include <video/omapdss.h> 36#include <video/omapfb_dss.h>
37#include <sound/omap-hdmi-audio.h> 37#include <sound/omap-hdmi-audio.h>
38 38
39#include "hdmi4_core.h" 39#include "hdmi4_core.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
index aade6d99662a..0ee829a165c3 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
@@ -38,7 +38,7 @@
38#include <linux/gpio.h> 38#include <linux/gpio.h>
39#include <linux/regulator/consumer.h> 39#include <linux/regulator/consumer.h>
40#include <linux/component.h> 40#include <linux/component.h>
41#include <video/omapdss.h> 41#include <video/omapfb_dss.h>
42#include <sound/omap-hdmi-audio.h> 42#include <sound/omap-hdmi-audio.h>
43 43
44#include "hdmi5_core.h" 44#include "hdmi5_core.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
index 8ea531d2652c..bbfe7e2d4332 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
@@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core)
51{ 51{
52 void __iomem *base = core->base; 52 void __iomem *base = core->base;
53 const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ 53 const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
54 const unsigned ss_scl_high = 4000; /* ns */ 54 const unsigned ss_scl_high = 4600; /* ns */
55 const unsigned ss_scl_low = 4700; /* ns */ 55 const unsigned ss_scl_low = 5400; /* ns */
56 const unsigned fs_scl_high = 600; /* ns */ 56 const unsigned fs_scl_high = 600; /* ns */
57 const unsigned fs_scl_low = 1300; /* ns */ 57 const unsigned fs_scl_low = 1300; /* ns */
58 const unsigned sda_hold = 1000; /* ns */ 58 const unsigned sda_hold = 1000; /* ns */
@@ -442,7 +442,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
442 442
443 c = (ptr[1] >> 6) & 0x3; 443 c = (ptr[1] >> 6) & 0x3;
444 m = (ptr[1] >> 4) & 0x3; 444 m = (ptr[1] >> 4) & 0x3;
445 r = (ptr[1] >> 0) & 0x3; 445 r = (ptr[1] >> 0) & 0xf;
446 446
447 itc = (ptr[2] >> 7) & 0x1; 447 itc = (ptr[2] >> 7) & 0x1;
448 ec = (ptr[2] >> 4) & 0x7; 448 ec = (ptr[2] >> 4) & 0x7;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
index 1b8fcc6c4ba1..189a5ad125a3 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
@@ -4,7 +4,7 @@
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/err.h> 5#include <linux/err.h>
6#include <linux/of.h> 6#include <linux/of.h>
7#include <video/omapdss.h> 7#include <video/omapfb_dss.h>
8 8
9#include "hdmi.h" 9#include "hdmi.h"
10 10
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
index 1f5d19c119ce..9a13c35fd6d8 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
@@ -13,7 +13,7 @@
13#include <linux/io.h> 13#include <linux/io.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <video/omapdss.h> 16#include <video/omapfb_dss.h>
17 17
18#include "dss.h" 18#include "dss.h"
19#include "hdmi.h" 19#include "hdmi.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
index 06e23a7c432c..eac3665aba6c 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
@@ -17,7 +17,7 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/clk.h> 18#include <linux/clk.h>
19 19
20#include <video/omapdss.h> 20#include <video/omapfb_dss.h>
21 21
22#include "dss.h" 22#include "dss.h"
23#include "hdmi.h" 23#include "hdmi.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
index 7c544bc56fb5..705373e4cf38 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
@@ -14,7 +14,7 @@
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <video/omapdss.h> 17#include <video/omapfb_dss.h>
18 18
19#include "dss.h" 19#include "dss.h"
20#include "hdmi.h" 20#include "hdmi.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
index a7414fb12830..9e2a67fdf4d2 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
@@ -26,7 +26,7 @@
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/jiffies.h> 27#include <linux/jiffies.h>
28 28
29#include <video/omapdss.h> 29#include <video/omapfb_dss.h>
30 30
31#include "dss.h" 31#include "dss.h"
32#include "dss_features.h" 32#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager.c b/drivers/video/fbdev/omap2/omapfb/dss/manager.c
index 08a67f4f6a20..69f86d2cc274 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/manager.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/manager.c
@@ -28,7 +28,7 @@
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/jiffies.h> 29#include <linux/jiffies.h>
30 30
31#include <video/omapdss.h> 31#include <video/omapfb_dss.h>
32 32
33#include "dss.h" 33#include "dss.h"
34#include "dss_features.h" 34#include "dss_features.h"
@@ -69,7 +69,6 @@ int dss_init_overlay_managers(void)
69 break; 69 break;
70 } 70 }
71 71
72 mgr->caps = 0;
73 mgr->supported_displays = 72 mgr->supported_displays =
74 dss_feat_get_supported_displays(mgr->id); 73 dss_feat_get_supported_displays(mgr->id);
75 mgr->supported_outputs = 74 mgr->supported_outputs =
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/output.c b/drivers/video/fbdev/omap2/omapfb/dss/output.c
index 16072159bd24..bed9a978269d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/output.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/output.c
@@ -21,7 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/of.h> 22#include <linux/of.h>
23 23
24#include <video/omapdss.h> 24#include <video/omapfb_dss.h>
25 25
26#include "dss.h" 26#include "dss.h"
27 27
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
index 4cc5ddebfb34..f1f6c0aea752 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
@@ -26,7 +26,7 @@
26#include <linux/kobject.h> 26#include <linux/kobject.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28 28
29#include <video/omapdss.h> 29#include <video/omapfb_dss.h>
30 30
31#include "dss.h" 31#include "dss.h"
32#include "dss_features.h" 32#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay.c
index 2f7cee985cdd..d6c5d75d2ef8 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/overlay.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay.c
@@ -30,7 +30,7 @@
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32 32
33#include <video/omapdss.h> 33#include <video/omapfb_dss.h>
34 34
35#include "dss.h" 35#include "dss.h"
36#include "dss_features.h" 36#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/pll.c b/drivers/video/fbdev/omap2/omapfb/dss/pll.c
index f974ddcd3b6e..0564c5606cd0 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/pll.c
@@ -22,7 +22,7 @@
22#include <linux/regulator/consumer.h> 22#include <linux/regulator/consumer.h>
23#include <linux/sched.h> 23#include <linux/sched.h>
24 24
25#include <video/omapdss.h> 25#include <video/omapfb_dss.h>
26 26
27#include "dss.h" 27#include "dss.h"
28 28
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c b/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c
index aea6a1d0fb20..562b0c4ae0c6 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c
@@ -38,7 +38,7 @@
38#include <linux/pm_runtime.h> 38#include <linux/pm_runtime.h>
39#include <linux/component.h> 39#include <linux/component.h>
40 40
41#include <video/omapdss.h> 41#include <video/omapfb_dss.h>
42#include "dss.h" 42#include "dss.h"
43 43
44struct rfbi_reg { u16 idx; }; 44struct rfbi_reg { u16 idx; };
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/sdi.c b/drivers/video/fbdev/omap2/omapfb/dss/sdi.c
index d747cc6b59e1..c4be732a4714 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/sdi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/sdi.c
@@ -29,7 +29,7 @@
29#include <linux/of.h> 29#include <linux/of.h>
30#include <linux/component.h> 30#include <linux/component.h>
31 31
32#include <video/omapdss.h> 32#include <video/omapfb_dss.h>
33#include "dss.h" 33#include "dss.h"
34 34
35static struct { 35static struct {
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
index 26e0ee30adf8..392464da12e4 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
@@ -37,7 +37,7 @@
37#include <linux/of.h> 37#include <linux/of.h>
38#include <linux/component.h> 38#include <linux/component.h>
39 39
40#include <video/omapdss.h> 40#include <video/omapfb_dss.h>
41 41
42#include "dss.h" 42#include "dss.h"
43#include "dss_features.h" 43#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c b/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
index b1ec59e42940..a890540f2037 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
@@ -17,7 +17,7 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19 19
20#include <video/omapdss.h> 20#include <video/omapfb_dss.h>
21 21
22#include "dss.h" 22#include "dss.h"
23#include "dss_features.h" 23#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index 9ddfdd63b84c..ef69273074ba 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -30,7 +30,7 @@
30#include <linux/export.h> 30#include <linux/export.h>
31#include <linux/sizes.h> 31#include <linux/sizes.h>
32 32
33#include <video/omapdss.h> 33#include <video/omapfb_dss.h>
34#include <video/omapvrfb.h> 34#include <video/omapvrfb.h>
35 35
36#include "omapfb.h" 36#include "omapfb.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index d3af01c94a58..2fb90cb6803f 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -30,7 +30,7 @@
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/omapfb.h> 31#include <linux/omapfb.h>
32 32
33#include <video/omapdss.h> 33#include <video/omapfb_dss.h>
34#include <video/omapvrfb.h> 34#include <video/omapvrfb.h>
35 35
36#include "omapfb.h" 36#include "omapfb.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
index 18fa9e1d0033..8087a009c54f 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
@@ -29,7 +29,7 @@
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/omapfb.h> 30#include <linux/omapfb.h>
31 31
32#include <video/omapdss.h> 32#include <video/omapfb_dss.h>
33#include <video/omapvrfb.h> 33#include <video/omapvrfb.h>
34 34
35#include "omapfb.h" 35#include "omapfb.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb.h b/drivers/video/fbdev/omap2/omapfb/omapfb.h
index 623cd872a367..bcb9ff4a607d 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb.h
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb.h
@@ -31,7 +31,7 @@
31#include <linux/dma-attrs.h> 31#include <linux/dma-attrs.h>
32#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
33 33
34#include <video/omapdss.h> 34#include <video/omapfb_dss.h>
35 35
36#ifdef DEBUG 36#ifdef DEBUG
37extern bool omapfb_debug; 37extern bool omapfb_debug;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a400951e8678..689d25ac6a68 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2042,6 +2042,11 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
2042 struct btrfs_bio *bbio = NULL; 2042 struct btrfs_bio *bbio = NULL;
2043 2043
2044 2044
2045 /*
2046 * Avoid races with device replace and make sure our bbio has devices
2047 * associated to its stripes that don't go away while we are discarding.
2048 */
2049 btrfs_bio_counter_inc_blocked(root->fs_info);
2045 /* Tell the block device(s) that the sectors can be discarded */ 2050 /* Tell the block device(s) that the sectors can be discarded */
2046 ret = btrfs_map_block(root->fs_info, REQ_DISCARD, 2051 ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
2047 bytenr, &num_bytes, &bbio, 0); 2052 bytenr, &num_bytes, &bbio, 0);
@@ -2074,6 +2079,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
2074 } 2079 }
2075 btrfs_put_bbio(bbio); 2080 btrfs_put_bbio(bbio);
2076 } 2081 }
2082 btrfs_bio_counter_dec(root->fs_info);
2077 2083
2078 if (actual_bytes) 2084 if (actual_bytes)
2079 *actual_bytes = discarded_bytes; 2085 *actual_bytes = discarded_bytes;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 3cd57825c75f..6e953de83f08 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2025,9 +2025,16 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
2025 bio->bi_iter.bi_size = 0; 2025 bio->bi_iter.bi_size = 0;
2026 map_length = length; 2026 map_length = length;
2027 2027
2028 /*
2029 * Avoid races with device replace and make sure our bbio has devices
2030 * associated to its stripes that don't go away while we are doing the
2031 * read repair operation.
2032 */
2033 btrfs_bio_counter_inc_blocked(fs_info);
2028 ret = btrfs_map_block(fs_info, WRITE, logical, 2034 ret = btrfs_map_block(fs_info, WRITE, logical,
2029 &map_length, &bbio, mirror_num); 2035 &map_length, &bbio, mirror_num);
2030 if (ret) { 2036 if (ret) {
2037 btrfs_bio_counter_dec(fs_info);
2031 bio_put(bio); 2038 bio_put(bio);
2032 return -EIO; 2039 return -EIO;
2033 } 2040 }
@@ -2037,6 +2044,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
2037 dev = bbio->stripes[mirror_num-1].dev; 2044 dev = bbio->stripes[mirror_num-1].dev;
2038 btrfs_put_bbio(bbio); 2045 btrfs_put_bbio(bbio);
2039 if (!dev || !dev->bdev || !dev->writeable) { 2046 if (!dev || !dev->bdev || !dev->writeable) {
2047 btrfs_bio_counter_dec(fs_info);
2040 bio_put(bio); 2048 bio_put(bio);
2041 return -EIO; 2049 return -EIO;
2042 } 2050 }
@@ -2045,6 +2053,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
2045 2053
2046 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) { 2054 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
2047 /* try to remap that extent elsewhere? */ 2055 /* try to remap that extent elsewhere? */
2056 btrfs_bio_counter_dec(fs_info);
2048 bio_put(bio); 2057 bio_put(bio);
2049 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 2058 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2050 return -EIO; 2059 return -EIO;
@@ -2054,6 +2063,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
2054 "read error corrected: ino %llu off %llu (dev %s sector %llu)", 2063 "read error corrected: ino %llu off %llu (dev %s sector %llu)",
2055 btrfs_ino(inode), start, 2064 btrfs_ino(inode), start,
2056 rcu_str_deref(dev->name), sector); 2065 rcu_str_deref(dev->name), sector);
2066 btrfs_bio_counter_dec(fs_info);
2057 bio_put(bio); 2067 bio_put(bio);
2058 return 0; 2068 return 0;
2059} 2069}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 270499598ed4..8b1212e8f7a8 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6979,7 +6979,18 @@ insert:
6979 * existing will always be non-NULL, since there must be 6979 * existing will always be non-NULL, since there must be
6980 * extent causing the -EEXIST. 6980 * extent causing the -EEXIST.
6981 */ 6981 */
6982 if (start >= extent_map_end(existing) || 6982 if (existing->start == em->start &&
6983 extent_map_end(existing) == extent_map_end(em) &&
6984 em->block_start == existing->block_start) {
6985 /*
6986 * these two extents are the same, it happens
6987 * with inlines especially
6988 */
6989 free_extent_map(em);
6990 em = existing;
6991 err = 0;
6992
6993 } else if (start >= extent_map_end(existing) ||
6983 start <= existing->start) { 6994 start <= existing->start) {
6984 /* 6995 /*
6985 * The existing extent map is the one nearest to 6996 * The existing extent map is the one nearest to
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 559170464d7c..e96634a725c3 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -718,12 +718,13 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
718 return count; 718 return count;
719} 719}
720 720
721void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, 721int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
722 const u64 range_start, const u64 range_len) 722 const u64 range_start, const u64 range_len)
723{ 723{
724 struct btrfs_root *root; 724 struct btrfs_root *root;
725 struct list_head splice; 725 struct list_head splice;
726 int done; 726 int done;
727 int total_done = 0;
727 728
728 INIT_LIST_HEAD(&splice); 729 INIT_LIST_HEAD(&splice);
729 730
@@ -742,6 +743,7 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
742 done = btrfs_wait_ordered_extents(root, nr, 743 done = btrfs_wait_ordered_extents(root, nr,
743 range_start, range_len); 744 range_start, range_len);
744 btrfs_put_fs_root(root); 745 btrfs_put_fs_root(root);
746 total_done += done;
745 747
746 spin_lock(&fs_info->ordered_root_lock); 748 spin_lock(&fs_info->ordered_root_lock);
747 if (nr != -1) { 749 if (nr != -1) {
@@ -752,6 +754,8 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
752 list_splice_tail(&splice, &fs_info->ordered_roots); 754 list_splice_tail(&splice, &fs_info->ordered_roots);
753 spin_unlock(&fs_info->ordered_root_lock); 755 spin_unlock(&fs_info->ordered_root_lock);
754 mutex_unlock(&fs_info->ordered_operations_mutex); 756 mutex_unlock(&fs_info->ordered_operations_mutex);
757
758 return total_done;
755} 759}
756 760
757/* 761/*
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 2049c9be85ee..451507776ff5 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -199,7 +199,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
199 u32 *sum, int len); 199 u32 *sum, int len);
200int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, 200int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
201 const u64 range_start, const u64 range_len); 201 const u64 range_start, const u64 range_len);
202void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, 202int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
203 const u64 range_start, const u64 range_len); 203 const u64 range_start, const u64 range_len);
204void btrfs_get_logged_extents(struct inode *inode, 204void btrfs_get_logged_extents(struct inode *inode,
205 struct list_head *logged_list, 205 struct list_head *logged_list,
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 298631eaee78..8428db7cd88f 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -761,12 +761,14 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info)
761 761
762 do { 762 do {
763 enqueued = 0; 763 enqueued = 0;
764 mutex_lock(&fs_devices->device_list_mutex);
764 list_for_each_entry(device, &fs_devices->devices, dev_list) { 765 list_for_each_entry(device, &fs_devices->devices, dev_list) {
765 if (atomic_read(&device->reada_in_flight) < 766 if (atomic_read(&device->reada_in_flight) <
766 MAX_IN_FLIGHT) 767 MAX_IN_FLIGHT)
767 enqueued += reada_start_machine_dev(fs_info, 768 enqueued += reada_start_machine_dev(fs_info,
768 device); 769 device);
769 } 770 }
771 mutex_unlock(&fs_devices->device_list_mutex);
770 total += enqueued; 772 total += enqueued;
771 } while (enqueued && total < 10000); 773 } while (enqueued && total < 10000);
772 774
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 46d847f66e4b..70427ef66b04 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3582,6 +3582,46 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3582 */ 3582 */
3583 scrub_pause_on(fs_info); 3583 scrub_pause_on(fs_info);
3584 ret = btrfs_inc_block_group_ro(root, cache); 3584 ret = btrfs_inc_block_group_ro(root, cache);
3585 if (!ret && is_dev_replace) {
3586 /*
3587 * If we are doing a device replace wait for any tasks
3588 * that started dellaloc right before we set the block
3589 * group to RO mode, as they might have just allocated
3590 * an extent from it or decided they could do a nocow
3591 * write. And if any such tasks did that, wait for their
3592 * ordered extents to complete and then commit the
3593 * current transaction, so that we can later see the new
3594 * extent items in the extent tree - the ordered extents
3595 * create delayed data references (for cow writes) when
3596 * they complete, which will be run and insert the
3597 * corresponding extent items into the extent tree when
3598 * we commit the transaction they used when running
3599 * inode.c:btrfs_finish_ordered_io(). We later use
3600 * the commit root of the extent tree to find extents
3601 * to copy from the srcdev into the tgtdev, and we don't
3602 * want to miss any new extents.
3603 */
3604 btrfs_wait_block_group_reservations(cache);
3605 btrfs_wait_nocow_writers(cache);
3606 ret = btrfs_wait_ordered_roots(fs_info, -1,
3607 cache->key.objectid,
3608 cache->key.offset);
3609 if (ret > 0) {
3610 struct btrfs_trans_handle *trans;
3611
3612 trans = btrfs_join_transaction(root);
3613 if (IS_ERR(trans))
3614 ret = PTR_ERR(trans);
3615 else
3616 ret = btrfs_commit_transaction(trans,
3617 root);
3618 if (ret) {
3619 scrub_pause_off(fs_info);
3620 btrfs_put_block_group(cache);
3621 break;
3622 }
3623 }
3624 }
3585 scrub_pause_off(fs_info); 3625 scrub_pause_off(fs_info);
3586 3626
3587 if (ret == 0) { 3627 if (ret == 0) {
@@ -3602,9 +3642,11 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3602 break; 3642 break;
3603 } 3643 }
3604 3644
3645 btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3605 dev_replace->cursor_right = found_key.offset + length; 3646 dev_replace->cursor_right = found_key.offset + length;
3606 dev_replace->cursor_left = found_key.offset; 3647 dev_replace->cursor_left = found_key.offset;
3607 dev_replace->item_needs_writeback = 1; 3648 dev_replace->item_needs_writeback = 1;
3649 btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3608 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, 3650 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3609 found_key.offset, cache, is_dev_replace); 3651 found_key.offset, cache, is_dev_replace);
3610 3652
@@ -3640,6 +3682,11 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3640 3682
3641 scrub_pause_off(fs_info); 3683 scrub_pause_off(fs_info);
3642 3684
3685 btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3686 dev_replace->cursor_left = dev_replace->cursor_right;
3687 dev_replace->item_needs_writeback = 1;
3688 btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3689
3643 if (ro_set) 3690 if (ro_set)
3644 btrfs_dec_block_group_ro(root, cache); 3691 btrfs_dec_block_group_ro(root, cache);
3645 3692
@@ -3677,9 +3724,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3677 ret = -ENOMEM; 3724 ret = -ENOMEM;
3678 break; 3725 break;
3679 } 3726 }
3680
3681 dev_replace->cursor_left = dev_replace->cursor_right;
3682 dev_replace->item_needs_writeback = 1;
3683skip: 3727skip:
3684 key.offset = found_key.offset + length; 3728 key.offset = found_key.offset + length;
3685 btrfs_release_path(path); 3729 btrfs_release_path(path);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index bdc62561ede8..da9e0036a864 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2761,6 +2761,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
2761 u64 dev_extent_len = 0; 2761 u64 dev_extent_len = 0;
2762 u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2762 u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2763 int i, ret = 0; 2763 int i, ret = 0;
2764 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2764 2765
2765 /* Just in case */ 2766 /* Just in case */
2766 root = root->fs_info->chunk_root; 2767 root = root->fs_info->chunk_root;
@@ -2787,12 +2788,19 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
2787 check_system_chunk(trans, extent_root, map->type); 2788 check_system_chunk(trans, extent_root, map->type);
2788 unlock_chunks(root->fs_info->chunk_root); 2789 unlock_chunks(root->fs_info->chunk_root);
2789 2790
2791 /*
2792 * Take the device list mutex to prevent races with the final phase of
2793 * a device replace operation that replaces the device object associated
2794 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
2795 */
2796 mutex_lock(&fs_devices->device_list_mutex);
2790 for (i = 0; i < map->num_stripes; i++) { 2797 for (i = 0; i < map->num_stripes; i++) {
2791 struct btrfs_device *device = map->stripes[i].dev; 2798 struct btrfs_device *device = map->stripes[i].dev;
2792 ret = btrfs_free_dev_extent(trans, device, 2799 ret = btrfs_free_dev_extent(trans, device,
2793 map->stripes[i].physical, 2800 map->stripes[i].physical,
2794 &dev_extent_len); 2801 &dev_extent_len);
2795 if (ret) { 2802 if (ret) {
2803 mutex_unlock(&fs_devices->device_list_mutex);
2796 btrfs_abort_transaction(trans, root, ret); 2804 btrfs_abort_transaction(trans, root, ret);
2797 goto out; 2805 goto out;
2798 } 2806 }
@@ -2811,11 +2819,14 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
2811 if (map->stripes[i].dev) { 2819 if (map->stripes[i].dev) {
2812 ret = btrfs_update_device(trans, map->stripes[i].dev); 2820 ret = btrfs_update_device(trans, map->stripes[i].dev);
2813 if (ret) { 2821 if (ret) {
2822 mutex_unlock(&fs_devices->device_list_mutex);
2814 btrfs_abort_transaction(trans, root, ret); 2823 btrfs_abort_transaction(trans, root, ret);
2815 goto out; 2824 goto out;
2816 } 2825 }
2817 } 2826 }
2818 } 2827 }
2828 mutex_unlock(&fs_devices->device_list_mutex);
2829
2819 ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset); 2830 ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
2820 if (ret) { 2831 if (ret) {
2821 btrfs_abort_transaction(trans, root, ret); 2832 btrfs_abort_transaction(trans, root, ret);
@@ -5762,20 +5773,17 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5762 } 5773 }
5763 } 5774 }
5764 if (found) { 5775 if (found) {
5765 if (physical_of_found + map->stripe_len <= 5776 struct btrfs_bio_stripe *tgtdev_stripe =
5766 dev_replace->cursor_left) { 5777 bbio->stripes + num_stripes;
5767 struct btrfs_bio_stripe *tgtdev_stripe =
5768 bbio->stripes + num_stripes;
5769 5778
5770 tgtdev_stripe->physical = physical_of_found; 5779 tgtdev_stripe->physical = physical_of_found;
5771 tgtdev_stripe->length = 5780 tgtdev_stripe->length =
5772 bbio->stripes[index_srcdev].length; 5781 bbio->stripes[index_srcdev].length;
5773 tgtdev_stripe->dev = dev_replace->tgtdev; 5782 tgtdev_stripe->dev = dev_replace->tgtdev;
5774 bbio->tgtdev_map[index_srcdev] = num_stripes; 5783 bbio->tgtdev_map[index_srcdev] = num_stripes;
5775 5784
5776 tgtdev_indexes++; 5785 tgtdev_indexes++;
5777 num_stripes++; 5786 num_stripes++;
5778 }
5779 } 5787 }
5780 } 5788 }
5781 5789
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 861d611b8c05..ce5f345d70f5 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -380,7 +380,7 @@ static void cachefiles_sync_cache(struct fscache_cache *_cache)
380 * check if the backing cache is updated to FS-Cache 380 * check if the backing cache is updated to FS-Cache
381 * - called by FS-Cache when evaluates if need to invalidate the cache 381 * - called by FS-Cache when evaluates if need to invalidate the cache
382 */ 382 */
383static bool cachefiles_check_consistency(struct fscache_operation *op) 383static int cachefiles_check_consistency(struct fscache_operation *op)
384{ 384{
385 struct cachefiles_object *object; 385 struct cachefiles_object *object;
386 struct cachefiles_cache *cache; 386 struct cachefiles_cache *cache;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index eeb71e5de27a..26a9d10d75e9 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -276,8 +276,10 @@ static void finish_read(struct ceph_osd_request *req)
276 for (i = 0; i < num_pages; i++) { 276 for (i = 0; i < num_pages; i++) {
277 struct page *page = osd_data->pages[i]; 277 struct page *page = osd_data->pages[i];
278 278
279 if (rc < 0 && rc != -ENOENT) 279 if (rc < 0 && rc != -ENOENT) {
280 ceph_fscache_readpage_cancel(inode, page);
280 goto unlock; 281 goto unlock;
282 }
281 if (bytes < (int)PAGE_SIZE) { 283 if (bytes < (int)PAGE_SIZE) {
282 /* zero (remainder of) page */ 284 /* zero (remainder of) page */
283 int s = bytes < 0 ? 0 : bytes; 285 int s = bytes < 0 ? 0 : bytes;
@@ -535,8 +537,6 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
535 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 537 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
536 set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); 538 set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC);
537 539
538 ceph_readpage_to_fscache(inode, page);
539
540 set_page_writeback(page); 540 set_page_writeback(page);
541 err = ceph_osdc_writepages(osdc, ceph_vino(inode), 541 err = ceph_osdc_writepages(osdc, ceph_vino(inode),
542 &ci->i_layout, snapc, 542 &ci->i_layout, snapc,
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index c052b5bf219b..238c55b01723 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -25,6 +25,7 @@
25#include "cache.h" 25#include "cache.h"
26 26
27struct ceph_aux_inode { 27struct ceph_aux_inode {
28 u64 version;
28 struct timespec mtime; 29 struct timespec mtime;
29 loff_t size; 30 loff_t size;
30}; 31};
@@ -69,15 +70,8 @@ int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
69 fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index, 70 fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
70 &ceph_fscache_fsid_object_def, 71 &ceph_fscache_fsid_object_def,
71 fsc, true); 72 fsc, true);
72 73 if (!fsc->fscache)
73 if (fsc->fscache == NULL) {
74 pr_err("Unable to resgister fsid: %p fscache cookie", fsc); 74 pr_err("Unable to resgister fsid: %p fscache cookie", fsc);
75 return 0;
76 }
77
78 fsc->revalidate_wq = alloc_workqueue("ceph-revalidate", 0, 1);
79 if (fsc->revalidate_wq == NULL)
80 return -ENOMEM;
81 75
82 return 0; 76 return 0;
83} 77}
@@ -105,6 +99,7 @@ static uint16_t ceph_fscache_inode_get_aux(const void *cookie_netfs_data,
105 const struct inode* inode = &ci->vfs_inode; 99 const struct inode* inode = &ci->vfs_inode;
106 100
107 memset(&aux, 0, sizeof(aux)); 101 memset(&aux, 0, sizeof(aux));
102 aux.version = ci->i_version;
108 aux.mtime = inode->i_mtime; 103 aux.mtime = inode->i_mtime;
109 aux.size = i_size_read(inode); 104 aux.size = i_size_read(inode);
110 105
@@ -131,6 +126,7 @@ static enum fscache_checkaux ceph_fscache_inode_check_aux(
131 return FSCACHE_CHECKAUX_OBSOLETE; 126 return FSCACHE_CHECKAUX_OBSOLETE;
132 127
133 memset(&aux, 0, sizeof(aux)); 128 memset(&aux, 0, sizeof(aux));
129 aux.version = ci->i_version;
134 aux.mtime = inode->i_mtime; 130 aux.mtime = inode->i_mtime;
135 aux.size = i_size_read(inode); 131 aux.size = i_size_read(inode);
136 132
@@ -181,32 +177,26 @@ static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
181 .now_uncached = ceph_fscache_inode_now_uncached, 177 .now_uncached = ceph_fscache_inode_now_uncached,
182}; 178};
183 179
184void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc, 180void ceph_fscache_register_inode_cookie(struct inode *inode)
185 struct ceph_inode_info* ci)
186{ 181{
187 struct inode* inode = &ci->vfs_inode; 182 struct ceph_inode_info *ci = ceph_inode(inode);
183 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
188 184
189 /* No caching for filesystem */ 185 /* No caching for filesystem */
190 if (fsc->fscache == NULL) 186 if (fsc->fscache == NULL)
191 return; 187 return;
192 188
193 /* Only cache for regular files that are read only */ 189 /* Only cache for regular files that are read only */
194 if ((ci->vfs_inode.i_mode & S_IFREG) == 0) 190 if (!S_ISREG(inode->i_mode))
195 return; 191 return;
196 192
197 /* Avoid multiple racing open requests */ 193 inode_lock_nested(inode, I_MUTEX_CHILD);
198 inode_lock(inode); 194 if (!ci->fscache) {
199 195 ci->fscache = fscache_acquire_cookie(fsc->fscache,
200 if (ci->fscache) 196 &ceph_fscache_inode_object_def,
201 goto done; 197 ci, false);
202 198 }
203 ci->fscache = fscache_acquire_cookie(fsc->fscache,
204 &ceph_fscache_inode_object_def,
205 ci, true);
206 fscache_check_consistency(ci->fscache);
207done:
208 inode_unlock(inode); 199 inode_unlock(inode);
209
210} 200}
211 201
212void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) 202void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
@@ -222,6 +212,34 @@ void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
222 fscache_relinquish_cookie(cookie, 0); 212 fscache_relinquish_cookie(cookie, 0);
223} 213}
224 214
215static bool ceph_fscache_can_enable(void *data)
216{
217 struct inode *inode = data;
218 return !inode_is_open_for_write(inode);
219}
220
221void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
222{
223 struct ceph_inode_info *ci = ceph_inode(inode);
224
225 if (!fscache_cookie_valid(ci->fscache))
226 return;
227
228 if (inode_is_open_for_write(inode)) {
229 dout("fscache_file_set_cookie %p %p disabling cache\n",
230 inode, filp);
231 fscache_disable_cookie(ci->fscache, false);
232 fscache_uncache_all_inode_pages(ci->fscache, inode);
233 } else {
234 fscache_enable_cookie(ci->fscache, ceph_fscache_can_enable,
235 inode);
236 if (fscache_cookie_enabled(ci->fscache)) {
237 dout("fscache_file_set_cookie %p %p enabing cache\n",
238 inode, filp);
239 }
240 }
241}
242
225static void ceph_vfs_readpage_complete(struct page *page, void *data, int error) 243static void ceph_vfs_readpage_complete(struct page *page, void *data, int error)
226{ 244{
227 if (!error) 245 if (!error)
@@ -238,8 +256,7 @@ static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int
238 256
239static inline bool cache_valid(struct ceph_inode_info *ci) 257static inline bool cache_valid(struct ceph_inode_info *ci)
240{ 258{
241 return ((ceph_caps_issued(ci) & CEPH_CAP_FILE_CACHE) && 259 return ci->i_fscache_gen == ci->i_rdcache_gen;
242 (ci->i_fscache_gen == ci->i_rdcache_gen));
243} 260}
244 261
245 262
@@ -332,69 +349,27 @@ void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
332 349
333void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc) 350void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
334{ 351{
335 if (fsc->revalidate_wq)
336 destroy_workqueue(fsc->revalidate_wq);
337
338 fscache_relinquish_cookie(fsc->fscache, 0); 352 fscache_relinquish_cookie(fsc->fscache, 0);
339 fsc->fscache = NULL; 353 fsc->fscache = NULL;
340} 354}
341 355
342static void ceph_revalidate_work(struct work_struct *work) 356/*
343{ 357 * caller should hold CEPH_CAP_FILE_{RD,CACHE}
344 int issued; 358 */
345 u32 orig_gen; 359void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
346 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
347 i_revalidate_work);
348 struct inode *inode = &ci->vfs_inode;
349
350 spin_lock(&ci->i_ceph_lock);
351 issued = __ceph_caps_issued(ci, NULL);
352 orig_gen = ci->i_rdcache_gen;
353 spin_unlock(&ci->i_ceph_lock);
354
355 if (!(issued & CEPH_CAP_FILE_CACHE)) {
356 dout("revalidate_work lost cache before validation %p\n",
357 inode);
358 goto out;
359 }
360
361 if (!fscache_check_consistency(ci->fscache))
362 fscache_invalidate(ci->fscache);
363
364 spin_lock(&ci->i_ceph_lock);
365 /* Update the new valid generation (backwards sanity check too) */
366 if (orig_gen > ci->i_fscache_gen) {
367 ci->i_fscache_gen = orig_gen;
368 }
369 spin_unlock(&ci->i_ceph_lock);
370
371out:
372 iput(&ci->vfs_inode);
373}
374
375void ceph_queue_revalidate(struct inode *inode)
376{ 360{
377 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 361 if (cache_valid(ci))
378 struct ceph_inode_info *ci = ceph_inode(inode);
379
380 if (fsc->revalidate_wq == NULL || ci->fscache == NULL)
381 return; 362 return;
382 363
383 ihold(inode); 364 /* resue i_truncate_mutex. There should be no pending
384 365 * truncate while the caller holds CEPH_CAP_FILE_RD */
385 if (queue_work(ceph_sb_to_client(inode->i_sb)->revalidate_wq, 366 mutex_lock(&ci->i_truncate_mutex);
386 &ci->i_revalidate_work)) { 367 if (!cache_valid(ci)) {
387 dout("ceph_queue_revalidate %p\n", inode); 368 if (fscache_check_consistency(ci->fscache))
388 } else { 369 fscache_invalidate(ci->fscache);
389 dout("ceph_queue_revalidate %p failed\n)", inode); 370 spin_lock(&ci->i_ceph_lock);
390 iput(inode); 371 ci->i_fscache_gen = ci->i_rdcache_gen;
372 spin_unlock(&ci->i_ceph_lock);
391 } 373 }
392} 374 mutex_unlock(&ci->i_truncate_mutex);
393
394void ceph_fscache_inode_init(struct ceph_inode_info *ci)
395{
396 ci->fscache = NULL;
397 /* The first load is verifed cookie open time */
398 ci->i_fscache_gen = 1;
399 INIT_WORK(&ci->i_revalidate_work, ceph_revalidate_work);
400} 375}
diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h
index 5ac591bd012b..7e72c7594f0c 100644
--- a/fs/ceph/cache.h
+++ b/fs/ceph/cache.h
@@ -34,10 +34,10 @@ void ceph_fscache_unregister(void);
34int ceph_fscache_register_fs(struct ceph_fs_client* fsc); 34int ceph_fscache_register_fs(struct ceph_fs_client* fsc);
35void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc); 35void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc);
36 36
37void ceph_fscache_inode_init(struct ceph_inode_info *ci); 37void ceph_fscache_register_inode_cookie(struct inode *inode);
38void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc,
39 struct ceph_inode_info* ci);
40void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci); 38void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci);
39void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp);
40void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci);
41 41
42int ceph_readpage_from_fscache(struct inode *inode, struct page *page); 42int ceph_readpage_from_fscache(struct inode *inode, struct page *page);
43int ceph_readpages_from_fscache(struct inode *inode, 43int ceph_readpages_from_fscache(struct inode *inode,
@@ -46,12 +46,11 @@ int ceph_readpages_from_fscache(struct inode *inode,
46 unsigned *nr_pages); 46 unsigned *nr_pages);
47void ceph_readpage_to_fscache(struct inode *inode, struct page *page); 47void ceph_readpage_to_fscache(struct inode *inode, struct page *page);
48void ceph_invalidate_fscache_page(struct inode* inode, struct page *page); 48void ceph_invalidate_fscache_page(struct inode* inode, struct page *page);
49void ceph_queue_revalidate(struct inode *inode);
50 49
51static inline void ceph_fscache_update_objectsize(struct inode *inode) 50static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
52{ 51{
53 struct ceph_inode_info *ci = ceph_inode(inode); 52 ci->fscache = NULL;
54 fscache_attr_changed(ci->fscache); 53 ci->i_fscache_gen = 0;
55} 54}
56 55
57static inline void ceph_fscache_invalidate(struct inode *inode) 56static inline void ceph_fscache_invalidate(struct inode *inode)
@@ -88,6 +87,11 @@ static inline void ceph_fscache_readpages_cancel(struct inode *inode,
88 return fscache_readpages_cancel(ci->fscache, pages); 87 return fscache_readpages_cancel(ci->fscache, pages);
89} 88}
90 89
90static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci)
91{
92 ci->i_fscache_gen = ci->i_rdcache_gen - 1;
93}
94
91#else 95#else
92 96
93static inline int ceph_fscache_register(void) 97static inline int ceph_fscache_register(void)
@@ -112,8 +116,20 @@ static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
112{ 116{
113} 117}
114 118
115static inline void ceph_fscache_register_inode_cookie(struct ceph_fs_client* parent_fsc, 119static inline void ceph_fscache_register_inode_cookie(struct inode *inode)
116 struct ceph_inode_info* ci) 120{
121}
122
123static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
124{
125}
126
127static inline void ceph_fscache_file_set_cookie(struct inode *inode,
128 struct file *filp)
129{
130}
131
132static inline void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
117{ 133{
118} 134}
119 135
@@ -141,10 +157,6 @@ static inline void ceph_readpage_to_fscache(struct inode *inode,
141{ 157{
142} 158}
143 159
144static inline void ceph_fscache_update_objectsize(struct inode *inode)
145{
146}
147
148static inline void ceph_fscache_invalidate(struct inode *inode) 160static inline void ceph_fscache_invalidate(struct inode *inode)
149{ 161{
150} 162}
@@ -154,10 +166,6 @@ static inline void ceph_invalidate_fscache_page(struct inode *inode,
154{ 166{
155} 167}
156 168
157static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
158{
159}
160
161static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp) 169static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp)
162{ 170{
163 return 1; 171 return 1;
@@ -173,7 +181,7 @@ static inline void ceph_fscache_readpages_cancel(struct inode *inode,
173{ 181{
174} 182}
175 183
176static inline void ceph_queue_revalidate(struct inode *inode) 184static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci)
177{ 185{
178} 186}
179 187
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index c17b5d76d75e..6f60d0a3d0f9 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2393,6 +2393,9 @@ again:
2393 snap_rwsem_locked = true; 2393 snap_rwsem_locked = true;
2394 } 2394 }
2395 *got = need | (have & want); 2395 *got = need | (have & want);
2396 if ((need & CEPH_CAP_FILE_RD) &&
2397 !(*got & CEPH_CAP_FILE_CACHE))
2398 ceph_disable_fscache_readpage(ci);
2396 __take_cap_refs(ci, *got, true); 2399 __take_cap_refs(ci, *got, true);
2397 ret = 1; 2400 ret = 1;
2398 } 2401 }
@@ -2554,6 +2557,9 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
2554 break; 2557 break;
2555 } 2558 }
2556 2559
2560 if ((_got & CEPH_CAP_FILE_RD) && (_got & CEPH_CAP_FILE_CACHE))
2561 ceph_fscache_revalidate_cookie(ci);
2562
2557 *got = _got; 2563 *got = _got;
2558 return 0; 2564 return 0;
2559} 2565}
@@ -2795,7 +2801,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
2795 bool writeback = false; 2801 bool writeback = false;
2796 bool queue_trunc = false; 2802 bool queue_trunc = false;
2797 bool queue_invalidate = false; 2803 bool queue_invalidate = false;
2798 bool queue_revalidate = false;
2799 bool deleted_inode = false; 2804 bool deleted_inode = false;
2800 bool fill_inline = false; 2805 bool fill_inline = false;
2801 2806
@@ -2837,8 +2842,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
2837 ci->i_rdcache_revoking = ci->i_rdcache_gen; 2842 ci->i_rdcache_revoking = ci->i_rdcache_gen;
2838 } 2843 }
2839 } 2844 }
2840
2841 ceph_fscache_invalidate(inode);
2842 } 2845 }
2843 2846
2844 /* side effects now are allowed */ 2847 /* side effects now are allowed */
@@ -2880,11 +2883,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
2880 } 2883 }
2881 } 2884 }
2882 2885
2883 /* Do we need to revalidate our fscache cookie. Don't bother on the
2884 * first cache cap as we already validate at cookie creation time. */
2885 if ((issued & CEPH_CAP_FILE_CACHE) && ci->i_rdcache_gen > 1)
2886 queue_revalidate = true;
2887
2888 if (newcaps & CEPH_CAP_ANY_RD) { 2886 if (newcaps & CEPH_CAP_ANY_RD) {
2889 /* ctime/mtime/atime? */ 2887 /* ctime/mtime/atime? */
2890 ceph_decode_timespec(&mtime, &grant->mtime); 2888 ceph_decode_timespec(&mtime, &grant->mtime);
@@ -2993,11 +2991,8 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
2993 if (fill_inline) 2991 if (fill_inline)
2994 ceph_fill_inline_data(inode, NULL, inline_data, inline_len); 2992 ceph_fill_inline_data(inode, NULL, inline_data, inline_len);
2995 2993
2996 if (queue_trunc) { 2994 if (queue_trunc)
2997 ceph_queue_vmtruncate(inode); 2995 ceph_queue_vmtruncate(inode);
2998 ceph_queue_revalidate(inode);
2999 } else if (queue_revalidate)
3000 ceph_queue_revalidate(inode);
3001 2996
3002 if (writeback) 2997 if (writeback)
3003 /* 2998 /*
@@ -3199,10 +3194,8 @@ static void handle_cap_trunc(struct inode *inode,
3199 truncate_seq, truncate_size, size); 3194 truncate_seq, truncate_size, size);
3200 spin_unlock(&ci->i_ceph_lock); 3195 spin_unlock(&ci->i_ceph_lock);
3201 3196
3202 if (queue_trunc) { 3197 if (queue_trunc)
3203 ceph_queue_vmtruncate(inode); 3198 ceph_queue_vmtruncate(inode);
3204 ceph_fscache_invalidate(inode);
3205 }
3206} 3199}
3207 3200
3208/* 3201/*
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index a888df6f2d71..ce2f5795e44b 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -137,23 +137,11 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
137{ 137{
138 struct ceph_file_info *cf; 138 struct ceph_file_info *cf;
139 int ret = 0; 139 int ret = 0;
140 struct ceph_inode_info *ci = ceph_inode(inode);
141 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
142 struct ceph_mds_client *mdsc = fsc->mdsc;
143 140
144 switch (inode->i_mode & S_IFMT) { 141 switch (inode->i_mode & S_IFMT) {
145 case S_IFREG: 142 case S_IFREG:
146 /* First file open request creates the cookie, we want to keep 143 ceph_fscache_register_inode_cookie(inode);
147 * this cookie around for the filetime of the inode as not to 144 ceph_fscache_file_set_cookie(inode, file);
148 * have to worry about fscache register / revoke / operation
149 * races.
150 *
151 * Also, if we know the operation is going to invalidate data
152 * (non readonly) just nuke the cache right away.
153 */
154 ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
155 if ((fmode & CEPH_FILE_MODE_WR))
156 ceph_fscache_invalidate(inode);
157 case S_IFDIR: 145 case S_IFDIR:
158 dout("init_file %p %p 0%o (regular)\n", inode, file, 146 dout("init_file %p %p 0%o (regular)\n", inode, file,
159 inode->i_mode); 147 inode->i_mode);
@@ -1349,7 +1337,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1349 } 1337 }
1350 1338
1351retry_snap: 1339retry_snap:
1352 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) { 1340 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
1353 err = -ENOSPC; 1341 err = -ENOSPC;
1354 goto out; 1342 goto out;
1355 } 1343 }
@@ -1407,7 +1395,6 @@ retry_snap:
1407 iov_iter_advance(from, written); 1395 iov_iter_advance(from, written);
1408 ceph_put_snap_context(snapc); 1396 ceph_put_snap_context(snapc);
1409 } else { 1397 } else {
1410 loff_t old_size = i_size_read(inode);
1411 /* 1398 /*
1412 * No need to acquire the i_truncate_mutex. Because 1399 * No need to acquire the i_truncate_mutex. Because
1413 * the MDS revokes Fwb caps before sending truncate 1400 * the MDS revokes Fwb caps before sending truncate
@@ -1418,8 +1405,6 @@ retry_snap:
1418 written = generic_perform_write(file, from, pos); 1405 written = generic_perform_write(file, from, pos);
1419 if (likely(written >= 0)) 1406 if (likely(written >= 0))
1420 iocb->ki_pos = pos + written; 1407 iocb->ki_pos = pos + written;
1421 if (i_size_read(inode) > old_size)
1422 ceph_fscache_update_objectsize(inode);
1423 inode_unlock(inode); 1408 inode_unlock(inode);
1424 } 1409 }
1425 1410
@@ -1440,7 +1425,7 @@ retry_snap:
1440 ceph_put_cap_refs(ci, got); 1425 ceph_put_cap_refs(ci, got);
1441 1426
1442 if (written >= 0) { 1427 if (written >= 0) {
1443 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL)) 1428 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL))
1444 iocb->ki_flags |= IOCB_DSYNC; 1429 iocb->ki_flags |= IOCB_DSYNC;
1445 1430
1446 written = generic_write_sync(iocb, written); 1431 written = generic_write_sync(iocb, written);
@@ -1672,8 +1657,8 @@ static long ceph_fallocate(struct file *file, int mode,
1672 goto unlock; 1657 goto unlock;
1673 } 1658 }
1674 1659
1675 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) && 1660 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) &&
1676 !(mode & FALLOC_FL_PUNCH_HOLE)) { 1661 !(mode & FALLOC_FL_PUNCH_HOLE)) {
1677 ret = -ENOSPC; 1662 ret = -ENOSPC;
1678 goto unlock; 1663 goto unlock;
1679 } 1664 }
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 0130a8592191..0168b49fb6ad 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -103,7 +103,6 @@ struct ceph_fs_client {
103 103
104#ifdef CONFIG_CEPH_FSCACHE 104#ifdef CONFIG_CEPH_FSCACHE
105 struct fscache_cookie *fscache; 105 struct fscache_cookie *fscache;
106 struct workqueue_struct *revalidate_wq;
107#endif 106#endif
108}; 107};
109 108
@@ -360,8 +359,7 @@ struct ceph_inode_info {
360 359
361#ifdef CONFIG_CEPH_FSCACHE 360#ifdef CONFIG_CEPH_FSCACHE
362 struct fscache_cookie *fscache; 361 struct fscache_cookie *fscache;
363 u32 i_fscache_gen; /* sequence, for delayed fscache validate */ 362 u32 i_fscache_gen;
364 struct work_struct i_revalidate_work;
365#endif 363#endif
366 struct inode vfs_inode; /* at end */ 364 struct inode vfs_inode; /* at end */
367}; 365};
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 0b2954d7172d..37c134a132c7 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -95,8 +95,6 @@ static struct ctl_table pty_root_table[] = {
95 95
96static DEFINE_MUTEX(allocated_ptys_lock); 96static DEFINE_MUTEX(allocated_ptys_lock);
97 97
98static struct vfsmount *devpts_mnt;
99
100struct pts_mount_opts { 98struct pts_mount_opts {
101 int setuid; 99 int setuid;
102 int setgid; 100 int setgid;
@@ -104,7 +102,7 @@ struct pts_mount_opts {
104 kgid_t gid; 102 kgid_t gid;
105 umode_t mode; 103 umode_t mode;
106 umode_t ptmxmode; 104 umode_t ptmxmode;
107 int newinstance; 105 int reserve;
108 int max; 106 int max;
109}; 107};
110 108
@@ -117,11 +115,9 @@ static const match_table_t tokens = {
117 {Opt_uid, "uid=%u"}, 115 {Opt_uid, "uid=%u"},
118 {Opt_gid, "gid=%u"}, 116 {Opt_gid, "gid=%u"},
119 {Opt_mode, "mode=%o"}, 117 {Opt_mode, "mode=%o"},
120#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
121 {Opt_ptmxmode, "ptmxmode=%o"}, 118 {Opt_ptmxmode, "ptmxmode=%o"},
122 {Opt_newinstance, "newinstance"}, 119 {Opt_newinstance, "newinstance"},
123 {Opt_max, "max=%d"}, 120 {Opt_max, "max=%d"},
124#endif
125 {Opt_err, NULL} 121 {Opt_err, NULL}
126}; 122};
127 123
@@ -137,15 +133,48 @@ static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb)
137 return sb->s_fs_info; 133 return sb->s_fs_info;
138} 134}
139 135
140static inline struct super_block *pts_sb_from_inode(struct inode *inode) 136struct pts_fs_info *devpts_acquire(struct file *filp)
141{ 137{
142#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES 138 struct pts_fs_info *result;
143 if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) 139 struct path path;
144 return inode->i_sb; 140 struct super_block *sb;
145#endif 141 int err;
146 if (!devpts_mnt) 142
147 return NULL; 143 path = filp->f_path;
148 return devpts_mnt->mnt_sb; 144 path_get(&path);
145
146 /* Has the devpts filesystem already been found? */
147 sb = path.mnt->mnt_sb;
148 if (sb->s_magic != DEVPTS_SUPER_MAGIC) {
149 /* Is a devpts filesystem at "pts" in the same directory? */
150 err = path_pts(&path);
151 if (err) {
152 result = ERR_PTR(err);
153 goto out;
154 }
155
156 /* Is the path the root of a devpts filesystem? */
157 result = ERR_PTR(-ENODEV);
158 sb = path.mnt->mnt_sb;
159 if ((sb->s_magic != DEVPTS_SUPER_MAGIC) ||
160 (path.mnt->mnt_root != sb->s_root))
161 goto out;
162 }
163
164 /*
165 * pty code needs to hold extra references in case of last /dev/tty close
166 */
167 atomic_inc(&sb->s_active);
168 result = DEVPTS_SB(sb);
169
170out:
171 path_put(&path);
172 return result;
173}
174
175void devpts_release(struct pts_fs_info *fsi)
176{
177 deactivate_super(fsi->sb);
149} 178}
150 179
151#define PARSE_MOUNT 0 180#define PARSE_MOUNT 0
@@ -154,9 +183,7 @@ static inline struct super_block *pts_sb_from_inode(struct inode *inode)
154/* 183/*
155 * parse_mount_options(): 184 * parse_mount_options():
156 * Set @opts to mount options specified in @data. If an option is not 185 * Set @opts to mount options specified in @data. If an option is not
157 * specified in @data, set it to its default value. The exception is 186 * specified in @data, set it to its default value.
158 * 'newinstance' option which can only be set/cleared on a mount (i.e.
159 * cannot be changed during remount).
160 * 187 *
161 * Note: @data may be NULL (in which case all options are set to default). 188 * Note: @data may be NULL (in which case all options are set to default).
162 */ 189 */
@@ -174,9 +201,12 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
174 opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; 201 opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
175 opts->max = NR_UNIX98_PTY_MAX; 202 opts->max = NR_UNIX98_PTY_MAX;
176 203
177 /* newinstance makes sense only on initial mount */ 204 /* Only allow instances mounted from the initial mount
205 * namespace to tap the reserve pool of ptys.
206 */
178 if (op == PARSE_MOUNT) 207 if (op == PARSE_MOUNT)
179 opts->newinstance = 0; 208 opts->reserve =
209 (current->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns);
180 210
181 while ((p = strsep(&data, ",")) != NULL) { 211 while ((p = strsep(&data, ",")) != NULL) {
182 substring_t args[MAX_OPT_ARGS]; 212 substring_t args[MAX_OPT_ARGS];
@@ -211,16 +241,12 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
211 return -EINVAL; 241 return -EINVAL;
212 opts->mode = option & S_IALLUGO; 242 opts->mode = option & S_IALLUGO;
213 break; 243 break;
214#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
215 case Opt_ptmxmode: 244 case Opt_ptmxmode:
216 if (match_octal(&args[0], &option)) 245 if (match_octal(&args[0], &option))
217 return -EINVAL; 246 return -EINVAL;
218 opts->ptmxmode = option & S_IALLUGO; 247 opts->ptmxmode = option & S_IALLUGO;
219 break; 248 break;
220 case Opt_newinstance: 249 case Opt_newinstance:
221 /* newinstance makes sense only on initial mount */
222 if (op == PARSE_MOUNT)
223 opts->newinstance = 1;
224 break; 250 break;
225 case Opt_max: 251 case Opt_max:
226 if (match_int(&args[0], &option) || 252 if (match_int(&args[0], &option) ||
@@ -228,7 +254,6 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
228 return -EINVAL; 254 return -EINVAL;
229 opts->max = option; 255 opts->max = option;
230 break; 256 break;
231#endif
232 default: 257 default:
233 pr_err("called with bogus options\n"); 258 pr_err("called with bogus options\n");
234 return -EINVAL; 259 return -EINVAL;
@@ -238,7 +263,6 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
238 return 0; 263 return 0;
239} 264}
240 265
241#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
242static int mknod_ptmx(struct super_block *sb) 266static int mknod_ptmx(struct super_block *sb)
243{ 267{
244 int mode; 268 int mode;
@@ -305,12 +329,6 @@ static void update_ptmx_mode(struct pts_fs_info *fsi)
305 inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode; 329 inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode;
306 } 330 }
307} 331}
308#else
309static inline void update_ptmx_mode(struct pts_fs_info *fsi)
310{
311 return;
312}
313#endif
314 332
315static int devpts_remount(struct super_block *sb, int *flags, char *data) 333static int devpts_remount(struct super_block *sb, int *flags, char *data)
316{ 334{
@@ -344,11 +362,9 @@ static int devpts_show_options(struct seq_file *seq, struct dentry *root)
344 seq_printf(seq, ",gid=%u", 362 seq_printf(seq, ",gid=%u",
345 from_kgid_munged(&init_user_ns, opts->gid)); 363 from_kgid_munged(&init_user_ns, opts->gid));
346 seq_printf(seq, ",mode=%03o", opts->mode); 364 seq_printf(seq, ",mode=%03o", opts->mode);
347#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
348 seq_printf(seq, ",ptmxmode=%03o", opts->ptmxmode); 365 seq_printf(seq, ",ptmxmode=%03o", opts->ptmxmode);
349 if (opts->max < NR_UNIX98_PTY_MAX) 366 if (opts->max < NR_UNIX98_PTY_MAX)
350 seq_printf(seq, ",max=%d", opts->max); 367 seq_printf(seq, ",max=%d", opts->max);
351#endif
352 368
353 return 0; 369 return 0;
354} 370}
@@ -410,40 +426,11 @@ fail:
410 return -ENOMEM; 426 return -ENOMEM;
411} 427}
412 428
413#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
414static int compare_init_pts_sb(struct super_block *s, void *p)
415{
416 if (devpts_mnt)
417 return devpts_mnt->mnt_sb == s;
418 return 0;
419}
420
421/* 429/*
422 * devpts_mount() 430 * devpts_mount()
423 * 431 *
424 * If the '-o newinstance' mount option was specified, mount a new 432 * Mount a new (private) instance of devpts. PTYs created in this
425 * (private) instance of devpts. PTYs created in this instance are 433 * instance are independent of the PTYs in other devpts instances.
426 * independent of the PTYs in other devpts instances.
427 *
428 * If the '-o newinstance' option was not specified, mount/remount the
429 * initial kernel mount of devpts. This type of mount gives the
430 * legacy, single-instance semantics.
431 *
432 * The 'newinstance' option is needed to support multiple namespace
433 * semantics in devpts while preserving backward compatibility of the
434 * current 'single-namespace' semantics. i.e all mounts of devpts
435 * without the 'newinstance' mount option should bind to the initial
436 * kernel mount, like mount_single().
437 *
438 * Mounts with 'newinstance' option create a new, private namespace.
439 *
440 * NOTE:
441 *
442 * For single-mount semantics, devpts cannot use mount_single(),
443 * because mount_single()/sget() find and use the super-block from
444 * the most recent mount of devpts. But that recent mount may be a
445 * 'newinstance' mount and mount_single() would pick the newinstance
446 * super-block instead of the initial super-block.
447 */ 434 */
448static struct dentry *devpts_mount(struct file_system_type *fs_type, 435static struct dentry *devpts_mount(struct file_system_type *fs_type,
449 int flags, const char *dev_name, void *data) 436 int flags, const char *dev_name, void *data)
@@ -456,18 +443,7 @@ static struct dentry *devpts_mount(struct file_system_type *fs_type,
456 if (error) 443 if (error)
457 return ERR_PTR(error); 444 return ERR_PTR(error);
458 445
459 /* Require newinstance for all user namespace mounts to ensure 446 s = sget(fs_type, NULL, set_anon_super, flags, NULL);
460 * the mount options are not changed.
461 */
462 if ((current_user_ns() != &init_user_ns) && !opts.newinstance)
463 return ERR_PTR(-EINVAL);
464
465 if (opts.newinstance)
466 s = sget(fs_type, NULL, set_anon_super, flags, NULL);
467 else
468 s = sget(fs_type, compare_init_pts_sb, set_anon_super, flags,
469 NULL);
470
471 if (IS_ERR(s)) 447 if (IS_ERR(s))
472 return ERR_CAST(s); 448 return ERR_CAST(s);
473 449
@@ -491,18 +467,6 @@ out_undo_sget:
491 return ERR_PTR(error); 467 return ERR_PTR(error);
492} 468}
493 469
494#else
495/*
496 * This supports only the legacy single-instance semantics (no
497 * multiple-instance semantics)
498 */
499static struct dentry *devpts_mount(struct file_system_type *fs_type, int flags,
500 const char *dev_name, void *data)
501{
502 return mount_single(fs_type, flags, data, devpts_fill_super);
503}
504#endif
505
506static void devpts_kill_sb(struct super_block *sb) 470static void devpts_kill_sb(struct super_block *sb)
507{ 471{
508 struct pts_fs_info *fsi = DEVPTS_SB(sb); 472 struct pts_fs_info *fsi = DEVPTS_SB(sb);
@@ -516,9 +480,7 @@ static struct file_system_type devpts_fs_type = {
516 .name = "devpts", 480 .name = "devpts",
517 .mount = devpts_mount, 481 .mount = devpts_mount,
518 .kill_sb = devpts_kill_sb, 482 .kill_sb = devpts_kill_sb,
519#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
520 .fs_flags = FS_USERNS_MOUNT | FS_USERNS_DEV_MOUNT, 483 .fs_flags = FS_USERNS_MOUNT | FS_USERNS_DEV_MOUNT,
521#endif
522}; 484};
523 485
524/* 486/*
@@ -531,16 +493,13 @@ int devpts_new_index(struct pts_fs_info *fsi)
531 int index; 493 int index;
532 int ida_ret; 494 int ida_ret;
533 495
534 if (!fsi)
535 return -ENODEV;
536
537retry: 496retry:
538 if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) 497 if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
539 return -ENOMEM; 498 return -ENOMEM;
540 499
541 mutex_lock(&allocated_ptys_lock); 500 mutex_lock(&allocated_ptys_lock);
542 if (pty_count >= pty_limit - 501 if (pty_count >= (pty_limit -
543 (fsi->mount_opts.newinstance ? pty_reserve : 0)) { 502 (fsi->mount_opts.reserve ? 0 : pty_reserve))) {
544 mutex_unlock(&allocated_ptys_lock); 503 mutex_unlock(&allocated_ptys_lock);
545 return -ENOSPC; 504 return -ENOSPC;
546 } 505 }
@@ -571,30 +530,6 @@ void devpts_kill_index(struct pts_fs_info *fsi, int idx)
571 mutex_unlock(&allocated_ptys_lock); 530 mutex_unlock(&allocated_ptys_lock);
572} 531}
573 532
574/*
575 * pty code needs to hold extra references in case of last /dev/tty close
576 */
577struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file)
578{
579 struct super_block *sb;
580 struct pts_fs_info *fsi;
581
582 sb = pts_sb_from_inode(ptmx_inode);
583 if (!sb)
584 return NULL;
585 fsi = DEVPTS_SB(sb);
586 if (!fsi)
587 return NULL;
588
589 atomic_inc(&sb->s_active);
590 return fsi;
591}
592
593void devpts_put_ref(struct pts_fs_info *fsi)
594{
595 deactivate_super(fsi->sb);
596}
597
598/** 533/**
599 * devpts_pty_new -- create a new inode in /dev/pts/ 534 * devpts_pty_new -- create a new inode in /dev/pts/
600 * @ptmx_inode: inode of the master 535 * @ptmx_inode: inode of the master
@@ -607,16 +542,12 @@ void devpts_put_ref(struct pts_fs_info *fsi)
607struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv) 542struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
608{ 543{
609 struct dentry *dentry; 544 struct dentry *dentry;
610 struct super_block *sb; 545 struct super_block *sb = fsi->sb;
611 struct inode *inode; 546 struct inode *inode;
612 struct dentry *root; 547 struct dentry *root;
613 struct pts_mount_opts *opts; 548 struct pts_mount_opts *opts;
614 char s[12]; 549 char s[12];
615 550
616 if (!fsi)
617 return ERR_PTR(-ENODEV);
618
619 sb = fsi->sb;
620 root = sb->s_root; 551 root = sb->s_root;
621 opts = &fsi->mount_opts; 552 opts = &fsi->mount_opts;
622 553
@@ -676,20 +607,8 @@ void devpts_pty_kill(struct dentry *dentry)
676static int __init init_devpts_fs(void) 607static int __init init_devpts_fs(void)
677{ 608{
678 int err = register_filesystem(&devpts_fs_type); 609 int err = register_filesystem(&devpts_fs_type);
679 struct ctl_table_header *table;
680
681 if (!err) { 610 if (!err) {
682 struct vfsmount *mnt; 611 register_sysctl_table(pty_root_table);
683
684 table = register_sysctl_table(pty_root_table);
685 mnt = kern_mount(&devpts_fs_type);
686 if (IS_ERR(mnt)) {
687 err = PTR_ERR(mnt);
688 unregister_filesystem(&devpts_fs_type);
689 unregister_sysctl_table(table);
690 } else {
691 devpts_mnt = mnt;
692 }
693 } 612 }
694 return err; 613 return err;
695} 614}
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 3078b679fcd1..c8c4f79c7ce1 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -887,6 +887,8 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie)
887 put_page(results[i]); 887 put_page(results[i]);
888 } 888 }
889 889
890 wake_up_bit(&cookie->flags, 0);
891
890 _leave(""); 892 _leave("");
891} 893}
892 894
diff --git a/fs/namei.c b/fs/namei.c
index 4c4f95ac8aa5..6a82fb7e2127 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1416,21 +1416,28 @@ static void follow_mount(struct path *path)
1416 } 1416 }
1417} 1417}
1418 1418
1419static int path_parent_directory(struct path *path)
1420{
1421 struct dentry *old = path->dentry;
1422 /* rare case of legitimate dget_parent()... */
1423 path->dentry = dget_parent(path->dentry);
1424 dput(old);
1425 if (unlikely(!path_connected(path)))
1426 return -ENOENT;
1427 return 0;
1428}
1429
1419static int follow_dotdot(struct nameidata *nd) 1430static int follow_dotdot(struct nameidata *nd)
1420{ 1431{
1421 while(1) { 1432 while(1) {
1422 struct dentry *old = nd->path.dentry;
1423
1424 if (nd->path.dentry == nd->root.dentry && 1433 if (nd->path.dentry == nd->root.dentry &&
1425 nd->path.mnt == nd->root.mnt) { 1434 nd->path.mnt == nd->root.mnt) {
1426 break; 1435 break;
1427 } 1436 }
1428 if (nd->path.dentry != nd->path.mnt->mnt_root) { 1437 if (nd->path.dentry != nd->path.mnt->mnt_root) {
1429 /* rare case of legitimate dget_parent()... */ 1438 int ret = path_parent_directory(&nd->path);
1430 nd->path.dentry = dget_parent(nd->path.dentry); 1439 if (ret)
1431 dput(old); 1440 return ret;
1432 if (unlikely(!path_connected(&nd->path)))
1433 return -ENOENT;
1434 break; 1441 break;
1435 } 1442 }
1436 if (!follow_up(&nd->path)) 1443 if (!follow_up(&nd->path))
@@ -2514,6 +2521,34 @@ struct dentry *lookup_one_len_unlocked(const char *name,
2514} 2521}
2515EXPORT_SYMBOL(lookup_one_len_unlocked); 2522EXPORT_SYMBOL(lookup_one_len_unlocked);
2516 2523
2524#ifdef CONFIG_UNIX98_PTYS
2525int path_pts(struct path *path)
2526{
2527 /* Find something mounted on "pts" in the same directory as
2528 * the input path.
2529 */
2530 struct dentry *child, *parent;
2531 struct qstr this;
2532 int ret;
2533
2534 ret = path_parent_directory(path);
2535 if (ret)
2536 return ret;
2537
2538 parent = path->dentry;
2539 this.name = "pts";
2540 this.len = 3;
2541 child = d_hash_and_lookup(parent, &this);
2542 if (!child)
2543 return -ENOENT;
2544
2545 path->dentry = child;
2546 dput(parent);
2547 follow_mount(path);
2548 return 0;
2549}
2550#endif
2551
2517int user_path_at_empty(int dfd, const char __user *name, unsigned flags, 2552int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
2518 struct path *path, int *empty) 2553 struct path *path, int *empty)
2519{ 2554{
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 70a41f742037..5731ccb42585 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -51,7 +51,8 @@ extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type);
51 */ 51 */
52extern bool acpi_video_handles_brightness_key_presses(void); 52extern bool acpi_video_handles_brightness_key_presses(void);
53extern int acpi_video_get_levels(struct acpi_device *device, 53extern int acpi_video_get_levels(struct acpi_device *device,
54 struct acpi_video_device_brightness **dev_br); 54 struct acpi_video_device_brightness **dev_br,
55 int *pmax_level);
55#else 56#else
56static inline int acpi_video_register(void) { return 0; } 57static inline int acpi_video_register(void) { return 0; }
57static inline void acpi_video_unregister(void) { return; } 58static inline void acpi_video_unregister(void) { return; }
@@ -72,7 +73,8 @@ static inline bool acpi_video_handles_brightness_key_presses(void)
72 return false; 73 return false;
73} 74}
74static inline int acpi_video_get_levels(struct acpi_device *device, 75static inline int acpi_video_get_levels(struct acpi_device *device,
75 struct acpi_video_device_brightness **dev_br) 76 struct acpi_video_device_brightness **dev_br,
77 int *pmax_level)
76{ 78{
77 return -ENODEV; 79 return -ENODEV;
78} 80}
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 84f1a8eefbdb..04310cb08111 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -52,10 +52,12 @@
52#include <linux/poll.h> 52#include <linux/poll.h>
53#include <linux/ratelimit.h> 53#include <linux/ratelimit.h>
54#include <linux/sched.h> 54#include <linux/sched.h>
55#include <linux/seqlock.h>
55#include <linux/slab.h> 56#include <linux/slab.h>
56#include <linux/types.h> 57#include <linux/types.h>
57#include <linux/vmalloc.h> 58#include <linux/vmalloc.h>
58#include <linux/workqueue.h> 59#include <linux/workqueue.h>
60#include <linux/fence.h>
59 61
60#include <asm/mman.h> 62#include <asm/mman.h>
61#include <asm/pgalloc.h> 63#include <asm/pgalloc.h>
@@ -66,6 +68,7 @@
66 68
67#include <drm/drm_agpsupport.h> 69#include <drm/drm_agpsupport.h>
68#include <drm/drm_crtc.h> 70#include <drm/drm_crtc.h>
71#include <drm/drm_fourcc.h>
69#include <drm/drm_global.h> 72#include <drm/drm_global.h>
70#include <drm/drm_hashtab.h> 73#include <drm/drm_hashtab.h>
71#include <drm/drm_mem_util.h> 74#include <drm/drm_mem_util.h>
@@ -281,13 +284,14 @@ struct drm_ioctl_desc {
281 284
282/* Event queued up for userspace to read */ 285/* Event queued up for userspace to read */
283struct drm_pending_event { 286struct drm_pending_event {
287 struct completion *completion;
284 struct drm_event *event; 288 struct drm_event *event;
289 struct fence *fence;
285 struct list_head link; 290 struct list_head link;
286 struct list_head pending_link; 291 struct list_head pending_link;
287 struct drm_file *file_priv; 292 struct drm_file *file_priv;
288 pid_t pid; /* pid of requester, no guarantee it's valid by the time 293 pid_t pid; /* pid of requester, no guarantee it's valid by the time
289 we deliver the event, for tracing only */ 294 we deliver the event, for tracing only */
290 void (*destroy)(struct drm_pending_event *event);
291}; 295};
292 296
293/* initial implementaton using a linked list - todo hashtab */ 297/* initial implementaton using a linked list - todo hashtab */
@@ -392,11 +396,6 @@ struct drm_master {
392 void *driver_priv; 396 void *driver_priv;
393}; 397};
394 398
395/* Size of ringbuffer for vblank timestamps. Just double-buffer
396 * in initial implementation.
397 */
398#define DRM_VBLANKTIME_RBSIZE 2
399
400/* Flags and return codes for get_vblank_timestamp() driver function. */ 399/* Flags and return codes for get_vblank_timestamp() driver function. */
401#define DRM_CALLED_FROM_VBLIRQ 1 400#define DRM_CALLED_FROM_VBLIRQ 1
402#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) 401#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
@@ -420,8 +419,6 @@ struct drm_driver {
420 void (*postclose) (struct drm_device *, struct drm_file *); 419 void (*postclose) (struct drm_device *, struct drm_file *);
421 void (*lastclose) (struct drm_device *); 420 void (*lastclose) (struct drm_device *);
422 int (*unload) (struct drm_device *); 421 int (*unload) (struct drm_device *);
423 int (*suspend) (struct drm_device *, pm_message_t state);
424 int (*resume) (struct drm_device *);
425 int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); 422 int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
426 int (*dma_quiescent) (struct drm_device *); 423 int (*dma_quiescent) (struct drm_device *);
427 int (*context_dtor) (struct drm_device *dev, int context); 424 int (*context_dtor) (struct drm_device *dev, int context);
@@ -434,7 +431,7 @@ struct drm_driver {
434 * 431 *
435 * Driver callback for fetching a raw hardware vblank counter for @crtc. 432 * Driver callback for fetching a raw hardware vblank counter for @crtc.
436 * If a device doesn't have a hardware counter, the driver can simply 433 * If a device doesn't have a hardware counter, the driver can simply
437 * return the value of drm_vblank_count. The DRM core will account for 434 * use drm_vblank_no_hw_counter() function. The DRM core will account for
438 * missed vblank events while interrupts where disabled based on system 435 * missed vblank events while interrupts where disabled based on system
439 * timestamps. 436 * timestamps.
440 * 437 *
@@ -452,8 +449,8 @@ struct drm_driver {
452 * @pipe: which irq to enable 449 * @pipe: which irq to enable
453 * 450 *
454 * Enable vblank interrupts for @crtc. If the device doesn't have 451 * Enable vblank interrupts for @crtc. If the device doesn't have
455 * a hardware vblank counter, this routine should be a no-op, since 452 * a hardware vblank counter, the driver should use the
456 * interrupts will have to stay on to keep the count accurate. 453 * drm_vblank_no_hw_counter() function that keeps a virtual counter.
457 * 454 *
458 * RETURNS 455 * RETURNS
459 * Zero on success, appropriate errno if the given @crtc's vblank 456 * Zero on success, appropriate errno if the given @crtc's vblank
@@ -467,8 +464,8 @@ struct drm_driver {
467 * @pipe: which irq to enable 464 * @pipe: which irq to enable
468 * 465 *
469 * Disable vblank interrupts for @crtc. If the device doesn't have 466 * Disable vblank interrupts for @crtc. If the device doesn't have
470 * a hardware vblank counter, this routine should be a no-op, since 467 * a hardware vblank counter, the driver should use the
471 * interrupts will have to stay on to keep the count accurate. 468 * drm_vblank_no_hw_counter() function that keeps a virtual counter.
472 */ 469 */
473 void (*disable_vblank) (struct drm_device *dev, unsigned int pipe); 470 void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
474 471
@@ -725,10 +722,10 @@ struct drm_vblank_crtc {
725 wait_queue_head_t queue; /**< VBLANK wait queue */ 722 wait_queue_head_t queue; /**< VBLANK wait queue */
726 struct timer_list disable_timer; /* delayed disable timer */ 723 struct timer_list disable_timer; /* delayed disable timer */
727 724
728 /* vblank counter, protected by dev->vblank_time_lock for writes */ 725 seqlock_t seqlock; /* protects vblank count and time */
729 u32 count; 726
730 /* vblank timestamps, protected by dev->vblank_time_lock for writes */ 727 u32 count; /* vblank counter */
731 struct timeval time[DRM_VBLANKTIME_RBSIZE]; 728 struct timeval time; /* vblank timestamp */
732 729
733 atomic_t refcount; /* number of users of vblank interruptsper crtc */ 730 atomic_t refcount; /* number of users of vblank interruptsper crtc */
734 u32 last; /* protected by dev->vbl_lock, used */ 731 u32 last; /* protected by dev->vbl_lock, used */
@@ -972,18 +969,12 @@ extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
972 struct timeval *vblanktime); 969 struct timeval *vblanktime);
973extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, 970extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
974 struct timeval *vblanktime); 971 struct timeval *vblanktime);
975extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
976 struct drm_pending_vblank_event *e);
977extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, 972extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
978 struct drm_pending_vblank_event *e); 973 struct drm_pending_vblank_event *e);
979extern void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
980 struct drm_pending_vblank_event *e);
981extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, 974extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
982 struct drm_pending_vblank_event *e); 975 struct drm_pending_vblank_event *e);
983extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe); 976extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
984extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc); 977extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
985extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
986extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe);
987extern int drm_crtc_vblank_get(struct drm_crtc *crtc); 978extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
988extern void drm_crtc_vblank_put(struct drm_crtc *crtc); 979extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
989extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe); 980extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
@@ -994,6 +985,7 @@ extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
994extern void drm_crtc_vblank_reset(struct drm_crtc *crtc); 985extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
995extern void drm_crtc_vblank_on(struct drm_crtc *crtc); 986extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
996extern void drm_vblank_cleanup(struct drm_device *dev); 987extern void drm_vblank_cleanup(struct drm_device *dev);
988extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
997extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe); 989extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
998 990
999extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, 991extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 92c84e9ab09a..856a9c85a838 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -30,6 +30,12 @@
30 30
31#include <drm/drm_crtc.h> 31#include <drm/drm_crtc.h>
32 32
33void drm_crtc_commit_put(struct drm_crtc_commit *commit);
34static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit)
35{
36 kref_get(&commit->ref);
37}
38
33struct drm_atomic_state * __must_check 39struct drm_atomic_state * __must_check
34drm_atomic_state_alloc(struct drm_device *dev); 40drm_atomic_state_alloc(struct drm_device *dev);
35void drm_atomic_state_clear(struct drm_atomic_state *state); 41void drm_atomic_state_clear(struct drm_atomic_state *state);
@@ -71,7 +77,7 @@ static inline struct drm_crtc_state *
71drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state, 77drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
72 struct drm_crtc *crtc) 78 struct drm_crtc *crtc)
73{ 79{
74 return state->crtc_states[drm_crtc_index(crtc)]; 80 return state->crtcs[drm_crtc_index(crtc)].state;
75} 81}
76 82
77/** 83/**
@@ -86,7 +92,7 @@ static inline struct drm_plane_state *
86drm_atomic_get_existing_plane_state(struct drm_atomic_state *state, 92drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
87 struct drm_plane *plane) 93 struct drm_plane *plane)
88{ 94{
89 return state->plane_states[drm_plane_index(plane)]; 95 return state->planes[drm_plane_index(plane)].state;
90} 96}
91 97
92/** 98/**
@@ -106,7 +112,43 @@ drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
106 if (index >= state->num_connector) 112 if (index >= state->num_connector)
107 return NULL; 113 return NULL;
108 114
109 return state->connector_states[index]; 115 return state->connectors[index].state;
116}
117
118/**
119 * __drm_atomic_get_current_plane_state - get current plane state
120 * @state: global atomic state object
121 * @plane: plane to grab
122 *
123 * This function returns the plane state for the given plane, either from
124 * @state, or if the plane isn't part of the atomic state update, from @plane.
125 * This is useful in atomic check callbacks, when drivers need to peek at, but
126 * not change, state of other planes, since it avoids threading an error code
127 * back up the call chain.
128 *
129 * WARNING:
130 *
131 * Note that this function is in general unsafe since it doesn't check for the
132 * required locking for access state structures. Drivers must ensure that it is
133 * safe to access the returned state structure through other means. One common
134 * example is when planes are fixed to a single CRTC, and the driver knows that
135 * the CRTC lock is held already. In that case holding the CRTC lock gives a
136 * read-lock on all planes connected to that CRTC. But if planes can be
137 * reassigned things get more tricky. In that case it's better to use
138 * drm_atomic_get_plane_state and wire up full error handling.
139 *
140 * Returns:
141 *
142 * Read-only pointer to the current plane state.
143 */
144static inline const struct drm_plane_state *
145__drm_atomic_get_current_plane_state(struct drm_atomic_state *state,
146 struct drm_plane *plane)
147{
148 if (state->planes[drm_plane_index(plane)].state)
149 return state->planes[drm_plane_index(plane)].state;
150
151 return plane->state;
110} 152}
111 153
112int __must_check 154int __must_check
@@ -139,29 +181,39 @@ int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
139int __must_check drm_atomic_commit(struct drm_atomic_state *state); 181int __must_check drm_atomic_commit(struct drm_atomic_state *state);
140int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state); 182int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
141 183
142#define for_each_connector_in_state(state, connector, connector_state, __i) \ 184#define for_each_connector_in_state(__state, connector, connector_state, __i) \
143 for ((__i) = 0; \ 185 for ((__i) = 0; \
144 (__i) < (state)->num_connector && \ 186 (__i) < (__state)->num_connector && \
145 ((connector) = (state)->connectors[__i], \ 187 ((connector) = (__state)->connectors[__i].ptr, \
146 (connector_state) = (state)->connector_states[__i], 1); \ 188 (connector_state) = (__state)->connectors[__i].state, 1); \
147 (__i)++) \ 189 (__i)++) \
148 for_each_if (connector) 190 for_each_if (connector)
149 191
150#define for_each_crtc_in_state(state, crtc, crtc_state, __i) \ 192#define for_each_crtc_in_state(__state, crtc, crtc_state, __i) \
151 for ((__i) = 0; \ 193 for ((__i) = 0; \
152 (__i) < (state)->dev->mode_config.num_crtc && \ 194 (__i) < (__state)->dev->mode_config.num_crtc && \
153 ((crtc) = (state)->crtcs[__i], \ 195 ((crtc) = (__state)->crtcs[__i].ptr, \
154 (crtc_state) = (state)->crtc_states[__i], 1); \ 196 (crtc_state) = (__state)->crtcs[__i].state, 1); \
155 (__i)++) \ 197 (__i)++) \
156 for_each_if (crtc_state) 198 for_each_if (crtc_state)
157 199
158#define for_each_plane_in_state(state, plane, plane_state, __i) \ 200#define for_each_plane_in_state(__state, plane, plane_state, __i) \
159 for ((__i) = 0; \ 201 for ((__i) = 0; \
160 (__i) < (state)->dev->mode_config.num_total_plane && \ 202 (__i) < (__state)->dev->mode_config.num_total_plane && \
161 ((plane) = (state)->planes[__i], \ 203 ((plane) = (__state)->planes[__i].ptr, \
162 (plane_state) = (state)->plane_states[__i], 1); \ 204 (plane_state) = (__state)->planes[__i].state, 1); \
163 (__i)++) \ 205 (__i)++) \
164 for_each_if (plane_state) 206 for_each_if (plane_state)
207
208/**
209 * drm_atomic_crtc_needs_modeset - compute combined modeset need
210 * @state: &drm_crtc_state for the CRTC
211 *
212 * To give drivers flexibility struct &drm_crtc_state has 3 booleans to track
213 * whether the state CRTC changed enough to need a full modeset cycle:
214 * connectors_changed, mode_changed and active_change. This helper simply
215 * combines these three to compute the overall need for a modeset for @state.
216 */
165static inline bool 217static inline bool
166drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state) 218drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
167{ 219{
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index d473dcc91f54..d86ae5dcd7b4 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -38,6 +38,7 @@ int drm_atomic_helper_check_planes(struct drm_device *dev,
38 struct drm_atomic_state *state); 38 struct drm_atomic_state *state);
39int drm_atomic_helper_check(struct drm_device *dev, 39int drm_atomic_helper_check(struct drm_device *dev,
40 struct drm_atomic_state *state); 40 struct drm_atomic_state *state);
41void drm_atomic_helper_commit_tail(struct drm_atomic_state *state);
41int drm_atomic_helper_commit(struct drm_device *dev, 42int drm_atomic_helper_commit(struct drm_device *dev,
42 struct drm_atomic_state *state, 43 struct drm_atomic_state *state,
43 bool nonblock); 44 bool nonblock);
@@ -71,8 +72,15 @@ void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_sta
71void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc, 72void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
72 bool atomic); 73 bool atomic);
73 74
74void drm_atomic_helper_swap_state(struct drm_device *dev, 75void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
75 struct drm_atomic_state *state); 76 bool stall);
77
78/* nonblocking commit helpers */
79int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
80 bool nonblock);
81void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state);
82void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state);
83void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state);
76 84
77/* implementations for legacy interfaces */ 85/* implementations for legacy interfaces */
78int drm_atomic_helper_update_plane(struct drm_plane *plane, 86int drm_atomic_helper_update_plane(struct drm_plane *plane,
@@ -147,9 +155,9 @@ void
147__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state); 155__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
148void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, 156void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
149 struct drm_connector_state *state); 157 struct drm_connector_state *state);
150void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, 158int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
151 u16 *red, u16 *green, u16 *blue, 159 u16 *red, u16 *green, u16 *blue,
152 uint32_t start, uint32_t size); 160 uint32_t size);
153 161
154/** 162/**
155 * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC 163 * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
@@ -159,7 +167,7 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
159 * This iterates over the current state, useful (for example) when applying 167 * This iterates over the current state, useful (for example) when applying
160 * atomic state after it has been checked and swapped. To iterate over the 168 * atomic state after it has been checked and swapped. To iterate over the
161 * planes which *will* be attached (for ->atomic_check()) see 169 * planes which *will* be attached (for ->atomic_check()) see
162 * drm_crtc_for_each_pending_plane() 170 * drm_atomic_crtc_state_for_each_plane().
163 */ 171 */
164#define drm_atomic_crtc_for_each_plane(plane, crtc) \ 172#define drm_atomic_crtc_for_each_plane(plane, crtc) \
165 drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask) 173 drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask)
@@ -171,11 +179,31 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
171 * 179 *
172 * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be 180 * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
173 * attached if the specified state is applied. Useful during (for example) 181 * attached if the specified state is applied. Useful during (for example)
174 * ->atomic_check() operations, to validate the incoming state 182 * ->atomic_check() operations, to validate the incoming state.
175 */ 183 */
176#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \ 184#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
177 drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) 185 drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
178 186
187/**
188 * drm_crtc_atomic_state_for_each_plane_state - iterate over attached planes in new state
189 * @plane: the loop cursor
190 * @plane_state: loop cursor for the plane's state, must be const
191 * @crtc_state: the incoming crtc-state
192 *
193 * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
194 * attached if the specified state is applied. Useful during (for example)
195 * ->atomic_check() operations, to validate the incoming state.
196 *
197 * Compared to just drm_atomic_crtc_state_for_each_plane() this also fills in a
198 * const plane_state. This is useful when a driver just wants to peek at other
199 * active planes on this crtc, but does not need to change it.
200 */
201#define drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) \
202 drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) \
203 for_each_if ((plane_state = \
204 __drm_atomic_get_current_plane_state((crtc_state)->state, \
205 plane)))
206
179/* 207/*
180 * drm_atomic_plane_disabling - check whether a plane is being disabled 208 * drm_atomic_plane_disabling - check whether a plane is being disabled
181 * @plane: plane object 209 * @plane: plane object
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index d1559cd04e3d..914baa8c161d 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -253,6 +253,8 @@ struct drm_framebuffer {
253 int bits_per_pixel; 253 int bits_per_pixel;
254 int flags; 254 int flags;
255 uint32_t pixel_format; /* fourcc format */ 255 uint32_t pixel_format; /* fourcc format */
256 int hot_x;
257 int hot_y;
256 struct list_head filp_head; 258 struct list_head filp_head;
257}; 259};
258 260
@@ -314,6 +316,7 @@ struct drm_plane_helper_funcs;
314 * update to ensure framebuffer cleanup isn't done too early 316 * update to ensure framebuffer cleanup isn't done too early
315 * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings 317 * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
316 * @mode: current mode timings 318 * @mode: current mode timings
319 * @mode_blob: &drm_property_blob for @mode
317 * @degamma_lut: Lookup table for converting framebuffer pixel data 320 * @degamma_lut: Lookup table for converting framebuffer pixel data
318 * before apply the conversion matrix 321 * before apply the conversion matrix
319 * @ctm: Transformation matrix 322 * @ctm: Transformation matrix
@@ -478,8 +481,8 @@ struct drm_crtc_funcs {
478 * going on, which should eventually be unified to just one set of 481 * going on, which should eventually be unified to just one set of
479 * hooks. 482 * hooks.
480 */ 483 */
481 void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, 484 int (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
482 uint32_t start, uint32_t size); 485 uint32_t size);
483 486
484 /** 487 /**
485 * @destroy: 488 * @destroy:
@@ -708,6 +711,7 @@ struct drm_crtc_funcs {
708 * @dev: parent DRM device 711 * @dev: parent DRM device
709 * @port: OF node used by drm_of_find_possible_crtcs() 712 * @port: OF node used by drm_of_find_possible_crtcs()
710 * @head: list management 713 * @head: list management
714 * @name: human readable name, can be overwritten by the driver
711 * @mutex: per-CRTC locking 715 * @mutex: per-CRTC locking
712 * @base: base KMS object for ID tracking etc. 716 * @base: base KMS object for ID tracking etc.
713 * @primary: primary plane for this CRTC 717 * @primary: primary plane for this CRTC
@@ -724,9 +728,6 @@ struct drm_crtc_funcs {
724 * @gamma_store: gamma ramp values 728 * @gamma_store: gamma ramp values
725 * @helper_private: mid-layer private data 729 * @helper_private: mid-layer private data
726 * @properties: property tracking for this CRTC 730 * @properties: property tracking for this CRTC
727 * @state: current atomic state for this CRTC
728 * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for
729 * legacy IOCTLs
730 * 731 *
731 * Each CRTC may have one or more connectors associated with it. This structure 732 * Each CRTC may have one or more connectors associated with it. This structure
732 * allows the CRTC to be controlled. 733 * allows the CRTC to be controlled.
@@ -738,12 +739,13 @@ struct drm_crtc {
738 739
739 char *name; 740 char *name;
740 741
741 /* 742 /**
742 * crtc mutex 743 * @mutex:
743 * 744 *
744 * This provides a read lock for the overall crtc state (mode, dpms 745 * This provides a read lock for the overall crtc state (mode, dpms
745 * state, ...) and a write lock for everything which can be update 746 * state, ...) and a write lock for everything which can be update
746 * without a full modeset (fb, cursor data, ...) 747 * without a full modeset (fb, cursor data, crtc properties ...). Full
748 * modeset also need to grab dev->mode_config.connection_mutex.
747 */ 749 */
748 struct drm_modeset_lock mutex; 750 struct drm_modeset_lock mutex;
749 751
@@ -753,6 +755,9 @@ struct drm_crtc {
753 struct drm_plane *primary; 755 struct drm_plane *primary;
754 struct drm_plane *cursor; 756 struct drm_plane *cursor;
755 757
758 /* position inside the mode_config.list, can be used as a [] idx */
759 unsigned index;
760
756 /* position of cursor plane on crtc */ 761 /* position of cursor plane on crtc */
757 int cursor_x; 762 int cursor_x;
758 int cursor_y; 763 int cursor_y;
@@ -779,11 +784,37 @@ struct drm_crtc {
779 784
780 struct drm_object_properties properties; 785 struct drm_object_properties properties;
781 786
787 /**
788 * @state:
789 *
790 * Current atomic state for this CRTC.
791 */
782 struct drm_crtc_state *state; 792 struct drm_crtc_state *state;
783 793
784 /* 794 /**
785 * For legacy crtc IOCTLs so that atomic drivers can get at the locking 795 * @commit_list:
786 * acquire context. 796 *
797 * List of &drm_crtc_commit structures tracking pending commits.
798 * Protected by @commit_lock. This list doesn't hold its own full
799 * reference, but burrows it from the ongoing commit. Commit entries
800 * must be removed from this list once the commit is fully completed,
801 * but before it's correspoding &drm_atomic_state gets destroyed.
802 */
803 struct list_head commit_list;
804
805 /**
806 * @commit_lock:
807 *
808 * Spinlock to protect @commit_list.
809 */
810 spinlock_t commit_lock;
811
812 /**
813 * @acquire_ctx:
814 *
815 * Per-CRTC implicit acquire context used by atomic drivers for legacy
816 * IOCTLs, so that atomic drivers can get at the locking acquire
817 * context.
787 */ 818 */
788 struct drm_modeset_acquire_ctx *acquire_ctx; 819 struct drm_modeset_acquire_ctx *acquire_ctx;
789}; 820};
@@ -1078,7 +1109,7 @@ struct drm_encoder_funcs {
1078 * @dev: parent DRM device 1109 * @dev: parent DRM device
1079 * @head: list management 1110 * @head: list management
1080 * @base: base KMS object 1111 * @base: base KMS object
1081 * @name: encoder name 1112 * @name: human readable name, can be overwritten by the driver
1082 * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h 1113 * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
1083 * @possible_crtcs: bitmask of potential CRTC bindings 1114 * @possible_crtcs: bitmask of potential CRTC bindings
1084 * @possible_clones: bitmask of potential sibling encoders for cloning 1115 * @possible_clones: bitmask of potential sibling encoders for cloning
@@ -1097,6 +1128,10 @@ struct drm_encoder {
1097 struct drm_mode_object base; 1128 struct drm_mode_object base;
1098 char *name; 1129 char *name;
1099 int encoder_type; 1130 int encoder_type;
1131
1132 /* position inside the mode_config.list, can be used as a [] idx */
1133 unsigned index;
1134
1100 uint32_t possible_crtcs; 1135 uint32_t possible_crtcs;
1101 uint32_t possible_clones; 1136 uint32_t possible_clones;
1102 1137
@@ -1124,7 +1159,8 @@ struct drm_encoder {
1124 * @attr: sysfs attributes 1159 * @attr: sysfs attributes
1125 * @head: list management 1160 * @head: list management
1126 * @base: base KMS object 1161 * @base: base KMS object
1127 * @name: connector name 1162 * @name: human readable name, can be overwritten by the driver
1163 * @connector_id: compacted connector id useful indexing arrays
1128 * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h 1164 * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
1129 * @connector_type_id: index into connector type enum 1165 * @connector_type_id: index into connector type enum
1130 * @interlace_allowed: can this connector handle interlaced modes? 1166 * @interlace_allowed: can this connector handle interlaced modes?
@@ -1137,7 +1173,6 @@ struct drm_encoder {
1137 * @funcs: connector control functions 1173 * @funcs: connector control functions
1138 * @edid_blob_ptr: DRM property containing EDID if present 1174 * @edid_blob_ptr: DRM property containing EDID if present
1139 * @properties: property tracking for this connector 1175 * @properties: property tracking for this connector
1140 * @path_blob_ptr: DRM blob property data for the DP MST path property
1141 * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling 1176 * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
1142 * @dpms: current dpms state 1177 * @dpms: current dpms state
1143 * @helper_private: mid-layer private data 1178 * @helper_private: mid-layer private data
@@ -1200,8 +1235,23 @@ struct drm_connector {
1200 struct drm_property_blob *edid_blob_ptr; 1235 struct drm_property_blob *edid_blob_ptr;
1201 struct drm_object_properties properties; 1236 struct drm_object_properties properties;
1202 1237
1238 /**
1239 * @path_blob_ptr:
1240 *
1241 * DRM blob property data for the DP MST path property.
1242 */
1203 struct drm_property_blob *path_blob_ptr; 1243 struct drm_property_blob *path_blob_ptr;
1204 1244
1245 /**
1246 * @tile_blob_ptr:
1247 *
1248 * DRM blob property data for the tile property (used mostly by DP MST).
1249 * This is meant for screens which are driven through separate display
1250 * pipelines represented by &drm_crtc, which might not be running with
1251 * genlocked clocks. For tiled panels which are genlocked, like
1252 * dual-link LVDS or dual-link DSI, the driver should try to not expose
1253 * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
1254 */
1205 struct drm_property_blob *tile_blob_ptr; 1255 struct drm_property_blob *tile_blob_ptr;
1206 1256
1207 uint8_t polled; /* DRM_CONNECTOR_POLL_* */ 1257 uint8_t polled; /* DRM_CONNECTOR_POLL_* */
@@ -1263,6 +1313,7 @@ struct drm_connector {
1263 * plane (in 16.16) 1313 * plane (in 16.16)
1264 * @src_w: width of visible portion of plane (in 16.16) 1314 * @src_w: width of visible portion of plane (in 16.16)
1265 * @src_h: height of visible portion of plane (in 16.16) 1315 * @src_h: height of visible portion of plane (in 16.16)
1316 * @rotation: rotation of the plane
1266 * @state: backpointer to global drm_atomic_state 1317 * @state: backpointer to global drm_atomic_state
1267 */ 1318 */
1268struct drm_plane_state { 1319struct drm_plane_state {
@@ -1503,6 +1554,7 @@ enum drm_plane_type {
1503 * struct drm_plane - central DRM plane control structure 1554 * struct drm_plane - central DRM plane control structure
1504 * @dev: DRM device this plane belongs to 1555 * @dev: DRM device this plane belongs to
1505 * @head: for list management 1556 * @head: for list management
1557 * @name: human readable name, can be overwritten by the driver
1506 * @base: base mode object 1558 * @base: base mode object
1507 * @possible_crtcs: pipes this plane can be bound to 1559 * @possible_crtcs: pipes this plane can be bound to
1508 * @format_types: array of formats supported by this plane 1560 * @format_types: array of formats supported by this plane
@@ -1516,6 +1568,7 @@ enum drm_plane_type {
1516 * @properties: property tracking for this plane 1568 * @properties: property tracking for this plane
1517 * @type: type of plane (overlay, primary, cursor) 1569 * @type: type of plane (overlay, primary, cursor)
1518 * @state: current atomic state for this plane 1570 * @state: current atomic state for this plane
1571 * @helper_private: mid-layer private data
1519 */ 1572 */
1520struct drm_plane { 1573struct drm_plane {
1521 struct drm_device *dev; 1574 struct drm_device *dev;
@@ -1523,6 +1576,13 @@ struct drm_plane {
1523 1576
1524 char *name; 1577 char *name;
1525 1578
1579 /**
1580 * @mutex:
1581 *
1582 * Protects modeset plane state, together with the mutex of &drm_crtc
1583 * this plane is linked to (when active, getting actived or getting
1584 * disabled).
1585 */
1526 struct drm_modeset_lock mutex; 1586 struct drm_modeset_lock mutex;
1527 1587
1528 struct drm_mode_object base; 1588 struct drm_mode_object base;
@@ -1543,6 +1603,9 @@ struct drm_plane {
1543 1603
1544 enum drm_plane_type type; 1604 enum drm_plane_type type;
1545 1605
1606 /* position inside the mode_config.list, can be used as a [] idx */
1607 unsigned index;
1608
1546 const struct drm_plane_helper_funcs *helper_private; 1609 const struct drm_plane_helper_funcs *helper_private;
1547 1610
1548 struct drm_plane_state *state; 1611 struct drm_plane_state *state;
@@ -1694,18 +1757,136 @@ struct drm_bridge {
1694}; 1757};
1695 1758
1696/** 1759/**
1760 * struct drm_crtc_commit - track modeset commits on a CRTC
1761 *
1762 * This structure is used to track pending modeset changes and atomic commit on
1763 * a per-CRTC basis. Since updating the list should never block this structure
1764 * is reference counted to allow waiters to safely wait on an event to complete,
1765 * without holding any locks.
1766 *
1767 * It has 3 different events in total to allow a fine-grained synchronization
1768 * between outstanding updates::
1769 *
1770 * atomic commit thread hardware
1771 *
1772 * write new state into hardware ----> ...
1773 * signal hw_done
1774 * switch to new state on next
1775 * ... v/hblank
1776 *
1777 * wait for buffers to show up ...
1778 *
1779 * ... send completion irq
1780 * irq handler signals flip_done
1781 * cleanup old buffers
1782 *
1783 * signal cleanup_done
1784 *
1785 * wait for flip_done <----
1786 * clean up atomic state
1787 *
1788 * The important bit to know is that cleanup_done is the terminal event, but the
1789 * ordering between flip_done and hw_done is entirely up to the specific driver
1790 * and modeset state change.
1791 *
1792 * For an implementation of how to use this look at
1793 * drm_atomic_helper_setup_commit() from the atomic helper library.
1794 */
1795struct drm_crtc_commit {
1796 /**
1797 * @crtc:
1798 *
1799 * DRM CRTC for this commit.
1800 */
1801 struct drm_crtc *crtc;
1802
1803 /**
1804 * @ref:
1805 *
1806 * Reference count for this structure. Needed to allow blocking on
1807 * completions without the risk of the completion disappearing
1808 * meanwhile.
1809 */
1810 struct kref ref;
1811
1812 /**
1813 * @flip_done:
1814 *
1815 * Will be signaled when the hardware has flipped to the new set of
1816 * buffers. Signals at the same time as when the drm event for this
1817 * commit is sent to userspace, or when an out-fence is singalled. Note
1818 * that for most hardware, in most cases this happens after @hw_done is
1819 * signalled.
1820 */
1821 struct completion flip_done;
1822
1823 /**
1824 * @hw_done:
1825 *
1826 * Will be signalled when all hw register changes for this commit have
1827 * been written out. Especially when disabling a pipe this can be much
1828 * later than than @flip_done, since that can signal already when the
1829 * screen goes black, whereas to fully shut down a pipe more register
1830 * I/O is required.
1831 *
1832 * Note that this does not need to include separately reference-counted
1833 * resources like backing storage buffer pinning, or runtime pm
1834 * management.
1835 */
1836 struct completion hw_done;
1837
1838 /**
1839 * @cleanup_done:
1840 *
1841 * Will be signalled after old buffers have been cleaned up by calling
1842 * drm_atomic_helper_cleanup_planes(). Since this can only happen after
1843 * a vblank wait completed it might be a bit later. This completion is
1844 * useful to throttle updates and avoid hardware updates getting ahead
1845 * of the buffer cleanup too much.
1846 */
1847 struct completion cleanup_done;
1848
1849 /**
1850 * @commit_entry:
1851 *
1852 * Entry on the per-CRTC commit_list. Protected by crtc->commit_lock.
1853 */
1854 struct list_head commit_entry;
1855
1856 /**
1857 * @event:
1858 *
1859 * &drm_pending_vblank_event pointer to clean up private events.
1860 */
1861 struct drm_pending_vblank_event *event;
1862};
1863
1864struct __drm_planes_state {
1865 struct drm_plane *ptr;
1866 struct drm_plane_state *state;
1867};
1868
1869struct __drm_crtcs_state {
1870 struct drm_crtc *ptr;
1871 struct drm_crtc_state *state;
1872 struct drm_crtc_commit *commit;
1873};
1874
1875struct __drm_connnectors_state {
1876 struct drm_connector *ptr;
1877 struct drm_connector_state *state;
1878};
1879
1880/**
1697 * struct drm_atomic_state - the global state object for atomic updates 1881 * struct drm_atomic_state - the global state object for atomic updates
1698 * @dev: parent DRM device 1882 * @dev: parent DRM device
1699 * @allow_modeset: allow full modeset 1883 * @allow_modeset: allow full modeset
1700 * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics 1884 * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
1701 * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL. 1885 * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
1702 * @planes: pointer to array of plane pointers 1886 * @planes: pointer to array of structures with per-plane data
1703 * @plane_states: pointer to array of plane states pointers
1704 * @crtcs: pointer to array of CRTC pointers 1887 * @crtcs: pointer to array of CRTC pointers
1705 * @crtc_states: pointer to array of CRTC states pointers
1706 * @num_connector: size of the @connectors and @connector_states arrays 1888 * @num_connector: size of the @connectors and @connector_states arrays
1707 * @connectors: pointer to array of connector pointers 1889 * @connectors: pointer to array of structures with per-connector data
1708 * @connector_states: pointer to array of connector states pointers
1709 * @acquire_ctx: acquire context for this atomic modeset state update 1890 * @acquire_ctx: acquire context for this atomic modeset state update
1710 */ 1891 */
1711struct drm_atomic_state { 1892struct drm_atomic_state {
@@ -1713,15 +1894,20 @@ struct drm_atomic_state {
1713 bool allow_modeset : 1; 1894 bool allow_modeset : 1;
1714 bool legacy_cursor_update : 1; 1895 bool legacy_cursor_update : 1;
1715 bool legacy_set_config : 1; 1896 bool legacy_set_config : 1;
1716 struct drm_plane **planes; 1897 struct __drm_planes_state *planes;
1717 struct drm_plane_state **plane_states; 1898 struct __drm_crtcs_state *crtcs;
1718 struct drm_crtc **crtcs;
1719 struct drm_crtc_state **crtc_states;
1720 int num_connector; 1899 int num_connector;
1721 struct drm_connector **connectors; 1900 struct __drm_connnectors_state *connectors;
1722 struct drm_connector_state **connector_states;
1723 1901
1724 struct drm_modeset_acquire_ctx *acquire_ctx; 1902 struct drm_modeset_acquire_ctx *acquire_ctx;
1903
1904 /**
1905 * @commit_work:
1906 *
1907 * Work item which can be used by the driver or helpers to execute the
1908 * commit without blocking.
1909 */
1910 struct work_struct commit_work;
1725}; 1911};
1726 1912
1727 1913
@@ -2022,8 +2208,6 @@ struct drm_mode_config_funcs {
2022 * @connection_mutex: ww mutex protecting connector state and routing 2208 * @connection_mutex: ww mutex protecting connector state and routing
2023 * @acquire_ctx: global implicit acquire context used by atomic drivers for 2209 * @acquire_ctx: global implicit acquire context used by atomic drivers for
2024 * legacy IOCTLs 2210 * legacy IOCTLs
2025 * @idr_mutex: mutex for KMS ID allocation and management
2026 * @crtc_idr: main KMS ID tracking object
2027 * @fb_lock: mutex to protect fb state and lists 2211 * @fb_lock: mutex to protect fb state and lists
2028 * @num_fb: number of fbs available 2212 * @num_fb: number of fbs available
2029 * @fb_list: list of framebuffers available 2213 * @fb_list: list of framebuffers available
@@ -2045,6 +2229,7 @@ struct drm_mode_config_funcs {
2045 * @fb_base: base address of the framebuffer 2229 * @fb_base: base address of the framebuffer
2046 * @poll_enabled: track polling support for this device 2230 * @poll_enabled: track polling support for this device
2047 * @poll_running: track polling status for this device 2231 * @poll_running: track polling status for this device
2232 * @delayed_event: track delayed poll uevent deliver for this device
2048 * @output_poll_work: delayed work for polling in process context 2233 * @output_poll_work: delayed work for polling in process context
2049 * @property_blob_list: list of all the blob property objects 2234 * @property_blob_list: list of all the blob property objects
2050 * @blob_lock: mutex for blob property allocation and management 2235 * @blob_lock: mutex for blob property allocation and management
@@ -2063,6 +2248,7 @@ struct drm_mode_config_funcs {
2063 * @async_page_flip: does this device support async flips on the primary plane? 2248 * @async_page_flip: does this device support async flips on the primary plane?
2064 * @cursor_width: hint to userspace for max cursor width 2249 * @cursor_width: hint to userspace for max cursor width
2065 * @cursor_height: hint to userspace for max cursor height 2250 * @cursor_height: hint to userspace for max cursor height
2251 * @helper_private: mid-layer private data
2066 * 2252 *
2067 * Core mode resource tracking structure. All CRTC, encoders, and connectors 2253 * Core mode resource tracking structure. All CRTC, encoders, and connectors
2068 * enumerated by the driver are added here, as are global properties. Some 2254 * enumerated by the driver are added here, as are global properties. Some
@@ -2072,10 +2258,30 @@ struct drm_mode_config {
2072 struct mutex mutex; /* protects configuration (mode lists etc.) */ 2258 struct mutex mutex; /* protects configuration (mode lists etc.) */
2073 struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */ 2259 struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */
2074 struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */ 2260 struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
2075 struct mutex idr_mutex; /* for IDR management */ 2261
2076 struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ 2262 /**
2077 struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ 2263 * @idr_mutex:
2078 /* this is limited to one for now */ 2264 *
2265 * Mutex for KMS ID allocation and management. Protects both @crtc_idr
2266 * and @tile_idr.
2267 */
2268 struct mutex idr_mutex;
2269
2270 /**
2271 * @crtc_idr:
2272 *
2273 * Main KMS ID tracking object. Use this idr for all IDs, fb, crtc,
2274 * connector, modes - just makes life easier to have only one.
2275 */
2276 struct idr crtc_idr;
2277
2278 /**
2279 * @tile_idr:
2280 *
2281 * Use this idr for allocating new IDs for tiled sinks like use in some
2282 * high-res DP MST screens.
2283 */
2284 struct idr tile_idr;
2079 2285
2080 struct mutex fb_lock; /* proctects global and per-file fb lists */ 2286 struct mutex fb_lock; /* proctects global and per-file fb lists */
2081 int num_fb; 2287 int num_fb;
@@ -2177,11 +2383,17 @@ struct drm_mode_config {
2177 /* whether async page flip is supported or not */ 2383 /* whether async page flip is supported or not */
2178 bool async_page_flip; 2384 bool async_page_flip;
2179 2385
2180 /* whether the driver supports fb modifiers */ 2386 /**
2387 * @allow_fb_modifiers:
2388 *
2389 * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
2390 */
2181 bool allow_fb_modifiers; 2391 bool allow_fb_modifiers;
2182 2392
2183 /* cursor size */ 2393 /* cursor size */
2184 uint32_t cursor_width, cursor_height; 2394 uint32_t cursor_width, cursor_height;
2395
2396 struct drm_mode_config_helper_funcs *helper_private;
2185}; 2397};
2186 2398
2187/** 2399/**
@@ -2230,7 +2442,18 @@ int drm_crtc_init_with_planes(struct drm_device *dev,
2230 const struct drm_crtc_funcs *funcs, 2442 const struct drm_crtc_funcs *funcs,
2231 const char *name, ...); 2443 const char *name, ...);
2232extern void drm_crtc_cleanup(struct drm_crtc *crtc); 2444extern void drm_crtc_cleanup(struct drm_crtc *crtc);
2233extern unsigned int drm_crtc_index(struct drm_crtc *crtc); 2445
2446/**
2447 * drm_crtc_index - find the index of a registered CRTC
2448 * @crtc: CRTC to find index for
2449 *
2450 * Given a registered CRTC, return the index of that CRTC within a DRM
2451 * device's list of CRTCs.
2452 */
2453static inline unsigned int drm_crtc_index(struct drm_crtc *crtc)
2454{
2455 return crtc->index;
2456}
2234 2457
2235/** 2458/**
2236 * drm_crtc_mask - find the mask of a registered CRTC 2459 * drm_crtc_mask - find the mask of a registered CRTC
@@ -2284,7 +2507,18 @@ int drm_encoder_init(struct drm_device *dev,
2284 struct drm_encoder *encoder, 2507 struct drm_encoder *encoder,
2285 const struct drm_encoder_funcs *funcs, 2508 const struct drm_encoder_funcs *funcs,
2286 int encoder_type, const char *name, ...); 2509 int encoder_type, const char *name, ...);
2287extern unsigned int drm_encoder_index(struct drm_encoder *encoder); 2510
2511/**
2512 * drm_encoder_index - find the index of a registered encoder
2513 * @encoder: encoder to find index for
2514 *
2515 * Given a registered encoder, return the index of that encoder within a DRM
2516 * device's list of encoders.
2517 */
2518static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
2519{
2520 return encoder->index;
2521}
2288 2522
2289/** 2523/**
2290 * drm_encoder_crtc_ok - can a given crtc drive a given encoder? 2524 * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
@@ -2315,7 +2549,18 @@ extern int drm_plane_init(struct drm_device *dev,
2315 const uint32_t *formats, unsigned int format_count, 2549 const uint32_t *formats, unsigned int format_count,
2316 bool is_primary); 2550 bool is_primary);
2317extern void drm_plane_cleanup(struct drm_plane *plane); 2551extern void drm_plane_cleanup(struct drm_plane *plane);
2318extern unsigned int drm_plane_index(struct drm_plane *plane); 2552
2553/**
2554 * drm_plane_index - find the index of a registered plane
2555 * @plane: plane to find index for
2556 *
2557 * Given a registered plane, return the index of that plane within a DRM
2558 * device's list of planes.
2559 */
2560static inline unsigned int drm_plane_index(struct drm_plane *plane)
2561{
2562 return plane->index;
2563}
2319extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx); 2564extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
2320extern void drm_plane_force_disable(struct drm_plane *plane); 2565extern void drm_plane_force_disable(struct drm_plane *plane);
2321extern int drm_plane_check_pixel_format(const struct drm_plane *plane, 2566extern int drm_plane_check_pixel_format(const struct drm_plane *plane,
@@ -2540,20 +2785,14 @@ extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
2540extern int drm_mode_atomic_ioctl(struct drm_device *dev, 2785extern int drm_mode_atomic_ioctl(struct drm_device *dev,
2541 void *data, struct drm_file *file_priv); 2786 void *data, struct drm_file *file_priv);
2542 2787
2543extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
2544 int *bpp);
2545extern int drm_format_num_planes(uint32_t format);
2546extern int drm_format_plane_cpp(uint32_t format, int plane);
2547extern int drm_format_horz_chroma_subsampling(uint32_t format);
2548extern int drm_format_vert_chroma_subsampling(uint32_t format);
2549extern int drm_format_plane_width(int width, uint32_t format, int plane);
2550extern int drm_format_plane_height(int height, uint32_t format, int plane);
2551extern const char *drm_get_format_name(uint32_t format);
2552extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, 2788extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
2553 unsigned int supported_rotations); 2789 unsigned int supported_rotations);
2554extern unsigned int drm_rotation_simplify(unsigned int rotation, 2790extern unsigned int drm_rotation_simplify(unsigned int rotation,
2555 unsigned int supported_rotations); 2791 unsigned int supported_rotations);
2556 2792extern void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
2793 uint degamma_lut_size,
2794 bool has_ctm,
2795 uint gamma_lut_size);
2557/* Helpers */ 2796/* Helpers */
2558 2797
2559static inline struct drm_plane *drm_plane_find(struct drm_device *dev, 2798static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 97fa894d4ee2..4b37afa2b73b 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -48,9 +48,6 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
48 struct drm_display_mode *mode, 48 struct drm_display_mode *mode,
49 int x, int y, 49 int x, int y,
50 struct drm_framebuffer *old_fb); 50 struct drm_framebuffer *old_fb);
51extern void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
52 int degamma_lut_size,
53 int gamma_lut_size);
54extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc); 51extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
55extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder); 52extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
56 53
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 9d03f167007b..5a848e734422 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -622,6 +622,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
622#define DP_BRANCH_OUI_HEADER_SIZE 0xc 622#define DP_BRANCH_OUI_HEADER_SIZE 0xc
623#define DP_RECEIVER_CAP_SIZE 0xf 623#define DP_RECEIVER_CAP_SIZE 0xf
624#define EDP_PSR_RECEIVER_CAP_SIZE 2 624#define EDP_PSR_RECEIVER_CAP_SIZE 2
625#define EDP_DISPLAY_CTL_CAP_SIZE 3
625 626
626void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); 627void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
627void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); 628void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 5b4aa35026a3..db8d4780eaa2 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -212,17 +212,6 @@ struct drm_fb_helper {
212 * needs to be reprobe when fbdev is in control again. 212 * needs to be reprobe when fbdev is in control again.
213 */ 213 */
214 bool delayed_hotplug; 214 bool delayed_hotplug;
215
216 /**
217 * @atomic:
218 *
219 * Use atomic updates for restore_fbdev_mode(), etc. This defaults to
220 * true if driver has DRIVER_ATOMIC feature flag, but drivers can
221 * override it to true after drm_fb_helper_init() if they support atomic
222 * modeset but do not yet advertise DRIVER_ATOMIC (note that fb-helper
223 * does not require ASYNC commits).
224 */
225 bool atomic;
226}; 215};
227 216
228#ifdef CONFIG_DRM_FBDEV_EMULATION 217#ifdef CONFIG_DRM_FBDEV_EMULATION
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
new file mode 100644
index 000000000000..7f90a396cf2b
--- /dev/null
+++ b/include/drm/drm_fourcc.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22#ifndef __DRM_FOURCC_H__
23#define __DRM_FOURCC_H__
24
25#include <linux/types.h>
26#include <uapi/drm/drm_fourcc.h>
27
28void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp);
29int drm_format_num_planes(uint32_t format);
30int drm_format_plane_cpp(uint32_t format, int plane);
31int drm_format_horz_chroma_subsampling(uint32_t format);
32int drm_format_vert_chroma_subsampling(uint32_t format);
33int drm_format_plane_width(int width, uint32_t format, int plane);
34int drm_format_plane_height(int height, uint32_t format, int plane);
35const char *drm_get_format_name(uint32_t format);
36
37#endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 7a9840f8b38e..72f5b15e0738 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -180,6 +180,8 @@ struct mipi_dsi_device {
180 unsigned long mode_flags; 180 unsigned long mode_flags;
181}; 181};
182 182
183#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:"
184
183static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev) 185static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
184{ 186{
185 return container_of(dev, struct mipi_dsi_device, dev); 187 return container_of(dev, struct mipi_dsi_device, dev);
@@ -263,6 +265,7 @@ int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
263 u16 end); 265 u16 end);
264int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start, 266int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
265 u16 end); 267 u16 end);
268int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param);
266int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi); 269int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
267int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi, 270int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
268 enum mipi_dsi_dcs_tear_mode mode); 271 enum mipi_dsi_dcs_tear_mode mode);
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 625966a906f2..ff481770d76b 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -169,6 +169,8 @@ enum drm_mode_status {
169 * 169 *
170 * The horizontal and vertical timings are defined per the following diagram. 170 * The horizontal and vertical timings are defined per the following diagram.
171 * 171 *
172 * ::
173 *
172 * 174 *
173 * Active Front Sync Back 175 * Active Front Sync Back
174 * Region Porch Porch 176 * Region Porch Porch
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index d4619dc2eecb..b55f21857a98 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -736,6 +736,11 @@ struct drm_connector_helper_funcs {
736 * inspect dynamic configuration state should instead use 736 * inspect dynamic configuration state should instead use
737 * @atomic_best_encoder. 737 * @atomic_best_encoder.
738 * 738 *
739 * You can leave this function to NULL if the connector is only
740 * attached to a single encoder and you are using the atomic helpers.
741 * In this case, the core will call drm_atomic_helper_best_encoder()
742 * for you.
743 *
739 * RETURNS: 744 * RETURNS:
740 * 745 *
741 * Encoder that should be used for the given connector and connector 746 * Encoder that should be used for the given connector and connector
@@ -752,8 +757,9 @@ struct drm_connector_helper_funcs {
752 * need to select the best encoder depending upon the desired 757 * need to select the best encoder depending upon the desired
753 * configuration and can't select it statically. 758 * configuration and can't select it statically.
754 * 759 *
755 * This function is used by drm_atomic_helper_check_modeset() and either 760 * This function is used by drm_atomic_helper_check_modeset().
756 * this or @best_encoder is required. 761 * If it is not implemented, the core will fallback to @best_encoder
762 * (or drm_atomic_helper_best_encoder() if @best_encoder is NULL).
757 * 763 *
758 * NOTE: 764 * NOTE:
759 * 765 *
@@ -925,4 +931,43 @@ static inline void drm_plane_helper_add(struct drm_plane *plane,
925 plane->helper_private = funcs; 931 plane->helper_private = funcs;
926} 932}
927 933
934/**
935 * struct drm_mode_config_helper_funcs - global modeset helper operations
936 *
937 * These helper functions are used by the atomic helpers.
938 */
939struct drm_mode_config_helper_funcs {
940 /**
941 * @atomic_commit_tail:
942 *
943 * This hook is used by the default atomic_commit() hook implemented in
944 * drm_atomic_helper_commit() together with the nonblocking commit
945 * helpers (see drm_atomic_helper_setup_commit() for a starting point)
946 * to implement blocking and nonblocking commits easily. It is not used
947 * by the atomic helpers
948 *
949 * This hook should first commit the given atomic state to the hardware.
950 * But drivers can add more waiting calls at the start of their
951 * implementation, e.g. to wait for driver-internal request for implicit
952 * syncing, before starting to commit the update to the hardware.
953 *
954 * After the atomic update is committed to the hardware this hook needs
955 * to call drm_atomic_helper_commit_hw_done(). Then wait for the upate
956 * to be executed by the hardware, for example using
957 * drm_atomic_helper_wait_for_vblanks(), and then clean up the old
958 * framebuffers using drm_atomic_helper_cleanup_planes().
959 *
960 * When disabling a CRTC this hook _must_ stall for the commit to
961 * complete. Vblank waits don't work on disabled CRTC, hence the core
962 * can't take care of this. And it also can't rely on the vblank event,
963 * since that can be signalled already when the screen shows black,
964 * which can happen much earlier than the last hardware access needed to
965 * shut off the display pipeline completely.
966 *
967 * This hook is optional, the default implementation is
968 * drm_atomic_helper_commit_tail().
969 */
970 void (*atomic_commit_tail)(struct drm_atomic_state *state);
971};
972
928#endif 973#endif
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
new file mode 100644
index 000000000000..269039722f91
--- /dev/null
+++ b/include/drm/drm_simple_kms_helper.h
@@ -0,0 +1,94 @@
1/*
2 * Copyright (C) 2016 Noralf Trønnes
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#ifndef __LINUX_DRM_SIMPLE_KMS_HELPER_H
11#define __LINUX_DRM_SIMPLE_KMS_HELPER_H
12
13struct drm_simple_display_pipe;
14
15/**
16 * struct drm_simple_display_pipe_funcs - helper operations for a simple
17 * display pipeline
18 */
19struct drm_simple_display_pipe_funcs {
20 /**
21 * @enable:
22 *
23 * This function should be used to enable the pipeline.
24 * It is called when the underlying crtc is enabled.
25 * This hook is optional.
26 */
27 void (*enable)(struct drm_simple_display_pipe *pipe,
28 struct drm_crtc_state *crtc_state);
29 /**
30 * @disable:
31 *
32 * This function should be used to disable the pipeline.
33 * It is called when the underlying crtc is disabled.
34 * This hook is optional.
35 */
36 void (*disable)(struct drm_simple_display_pipe *pipe);
37
38 /**
39 * @check:
40 *
41 * This function is called in the check phase of an atomic update,
42 * specifically when the underlying plane is checked.
43 * The simple display pipeline helpers already check that the plane is
44 * not scaled, fills the entire visible area and is always enabled
45 * when the crtc is also enabled.
46 * This hook is optional.
47 *
48 * RETURNS:
49 *
50 * 0 on success, -EINVAL if the state or the transition can't be
51 * supported, -ENOMEM on memory allocation failure and -EDEADLK if an
52 * attempt to obtain another state object ran into a &drm_modeset_lock
53 * deadlock.
54 */
55 int (*check)(struct drm_simple_display_pipe *pipe,
56 struct drm_plane_state *plane_state,
57 struct drm_crtc_state *crtc_state);
58 /**
59 * @update:
60 *
61 * This function is called when the underlying plane state is updated.
62 * This hook is optional.
63 */
64 void (*update)(struct drm_simple_display_pipe *pipe,
65 struct drm_plane_state *plane_state);
66};
67
68/**
69 * struct drm_simple_display_pipe - simple display pipeline
70 * @crtc: CRTC control structure
71 * @plane: Plane control structure
72 * @encoder: Encoder control structure
73 * @connector: Connector control structure
74 * @funcs: Pipeline control functions (optional)
75 *
76 * Simple display pipeline with plane, crtc and encoder collapsed into one
77 * entity. It should be initialized by calling drm_simple_display_pipe_init().
78 */
79struct drm_simple_display_pipe {
80 struct drm_crtc crtc;
81 struct drm_plane plane;
82 struct drm_encoder encoder;
83 struct drm_connector *connector;
84
85 const struct drm_simple_display_pipe_funcs *funcs;
86};
87
88int drm_simple_display_pipe_init(struct drm_device *dev,
89 struct drm_simple_display_pipe *pipe,
90 const struct drm_simple_display_pipe_funcs *funcs,
91 const uint32_t *formats, unsigned int format_count,
92 struct drm_connector *connector);
93
94#endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 595f85c392ac..b1755f8db36b 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -92,4 +92,7 @@ extern bool i915_gpu_turbo_disable(void);
92#define I845_TSEG_SIZE_512K (2 << 1) 92#define I845_TSEG_SIZE_512K (2 << 1)
93#define I845_TSEG_SIZE_1M (3 << 1) 93#define I845_TSEG_SIZE_1M (3 << 1)
94 94
95#define INTEL_BSM 0x5c
96#define INTEL_BSM_MASK (0xFFFF << 20)
97
95#endif /* _I915_DRM_H_ */ 98#endif /* _I915_DRM_H_ */
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 19b14862d3e0..1b3b6e155392 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -279,6 +279,11 @@ struct ceph_osd_client {
279 struct workqueue_struct *notify_wq; 279 struct workqueue_struct *notify_wq;
280}; 280};
281 281
282static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag)
283{
284 return osdc->osdmap->flags & flag;
285}
286
282extern int ceph_osdc_setup(void); 287extern int ceph_osdc_setup(void);
283extern void ceph_osdc_cleanup(void); 288extern void ceph_osdc_cleanup(void);
284 289
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index ddc426b22d81..9ccf4dbe55f8 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -189,11 +189,6 @@ static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd)
189 return !ceph_osd_is_up(map, osd); 189 return !ceph_osd_is_up(map, osd);
190} 190}
191 191
192static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag)
193{
194 return map && (map->flags & flag);
195}
196
197extern char *ceph_osdmap_state_str(char *str, int len, int state); 192extern char *ceph_osdmap_state_str(char *str, int len, int state);
198extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd); 193extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd);
199 194
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index 5871f292b596..277ab9af9ac2 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -15,13 +15,12 @@
15 15
16#include <linux/errno.h> 16#include <linux/errno.h>
17 17
18struct pts_fs_info;
19
20#ifdef CONFIG_UNIX98_PTYS 18#ifdef CONFIG_UNIX98_PTYS
21 19
22/* Look up a pts fs info and get a ref to it */ 20struct pts_fs_info;
23struct pts_fs_info *devpts_get_ref(struct inode *, struct file *); 21
24void devpts_put_ref(struct pts_fs_info *); 22struct pts_fs_info *devpts_acquire(struct file *);
23void devpts_release(struct pts_fs_info *);
25 24
26int devpts_new_index(struct pts_fs_info *); 25int devpts_new_index(struct pts_fs_info *);
27void devpts_kill_index(struct pts_fs_info *, int); 26void devpts_kill_index(struct pts_fs_info *, int);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 3fe90d494edb..4551c6f2a6c4 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -112,19 +112,24 @@ struct dma_buf_ops {
112 * @file: file pointer used for sharing buffers across, and for refcounting. 112 * @file: file pointer used for sharing buffers across, and for refcounting.
113 * @attachments: list of dma_buf_attachment that denotes all devices attached. 113 * @attachments: list of dma_buf_attachment that denotes all devices attached.
114 * @ops: dma_buf_ops associated with this buffer object. 114 * @ops: dma_buf_ops associated with this buffer object.
115 * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap
116 * @vmapping_counter: used internally to refcnt the vmaps
117 * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
115 * @exp_name: name of the exporter; useful for debugging. 118 * @exp_name: name of the exporter; useful for debugging.
116 * @owner: pointer to exporter module; used for refcounting when exporter is a 119 * @owner: pointer to exporter module; used for refcounting when exporter is a
117 * kernel module. 120 * kernel module.
118 * @list_node: node for dma_buf accounting and debugging. 121 * @list_node: node for dma_buf accounting and debugging.
119 * @priv: exporter specific private data for this buffer object. 122 * @priv: exporter specific private data for this buffer object.
120 * @resv: reservation object linked to this dma-buf 123 * @resv: reservation object linked to this dma-buf
124 * @poll: for userspace poll support
125 * @cb_excl: for userspace poll support
126 * @cb_shared: for userspace poll support
121 */ 127 */
122struct dma_buf { 128struct dma_buf {
123 size_t size; 129 size_t size;
124 struct file *file; 130 struct file *file;
125 struct list_head attachments; 131 struct list_head attachments;
126 const struct dma_buf_ops *ops; 132 const struct dma_buf_ops *ops;
127 /* mutex to serialize list manipulation, attach/detach and vmap/unmap */
128 struct mutex lock; 133 struct mutex lock;
129 unsigned vmapping_counter; 134 unsigned vmapping_counter;
130 void *vmap_ptr; 135 void *vmap_ptr;
@@ -188,9 +193,11 @@ struct dma_buf_export_info {
188 193
189/** 194/**
190 * helper macro for exporters; zeros and fills in most common values 195 * helper macro for exporters; zeros and fills in most common values
196 *
197 * @name: export-info name
191 */ 198 */
192#define DEFINE_DMA_BUF_EXPORT_INFO(a) \ 199#define DEFINE_DMA_BUF_EXPORT_INFO(name) \
193 struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME, \ 200 struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
194 .owner = THIS_MODULE } 201 .owner = THIS_MODULE }
195 202
196/** 203/**
diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h
new file mode 100644
index 000000000000..86baaa45567c
--- /dev/null
+++ b/include/linux/fence-array.h
@@ -0,0 +1,73 @@
1/*
2 * fence-array: aggregates fence to be waited together
3 *
4 * Copyright (C) 2016 Collabora Ltd
5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 * Authors:
7 * Gustavo Padovan <gustavo@padovan.org>
8 * Christian König <christian.koenig@amd.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 */
19
20#ifndef __LINUX_FENCE_ARRAY_H
21#define __LINUX_FENCE_ARRAY_H
22
23#include <linux/fence.h>
24
25/**
26 * struct fence_array_cb - callback helper for fence array
27 * @cb: fence callback structure for signaling
28 * @array: reference to the parent fence array object
29 */
30struct fence_array_cb {
31 struct fence_cb cb;
32 struct fence_array *array;
33};
34
35/**
36 * struct fence_array - fence to represent an array of fences
37 * @base: fence base class
38 * @lock: spinlock for fence handling
39 * @num_fences: number of fences in the array
40 * @num_pending: fences in the array still pending
41 * @fences: array of the fences
42 */
43struct fence_array {
44 struct fence base;
45
46 spinlock_t lock;
47 unsigned num_fences;
48 atomic_t num_pending;
49 struct fence **fences;
50};
51
52extern const struct fence_ops fence_array_ops;
53
54/**
55 * to_fence_array - cast a fence to a fence_array
56 * @fence: fence to cast to a fence_array
57 *
58 * Returns NULL if the fence is not a fence_array,
59 * or the fence_array otherwise.
60 */
61static inline struct fence_array *to_fence_array(struct fence *fence)
62{
63 if (fence->ops != &fence_array_ops)
64 return NULL;
65
66 return container_of(fence, struct fence_array, base);
67}
68
69struct fence_array *fence_array_create(int num_fences, struct fence **fences,
70 u64 context, unsigned seqno,
71 bool signal_on_any);
72
73#endif /* __LINUX_FENCE_ARRAY_H */
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 2b17698b60b8..44d945e96473 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -49,6 +49,8 @@ struct fence_cb;
49 * @timestamp: Timestamp when the fence was signaled. 49 * @timestamp: Timestamp when the fence was signaled.
50 * @status: Optional, only valid if < 0, must be set before calling 50 * @status: Optional, only valid if < 0, must be set before calling
51 * fence_signal, indicates that the fence has completed with an error. 51 * fence_signal, indicates that the fence has completed with an error.
52 * @child_list: list of children fences
53 * @active_list: list of active fences
52 * 54 *
53 * the flags member must be manipulated and read using the appropriate 55 * the flags member must be manipulated and read using the appropriate
54 * atomic ops (bit_*), so taking the spinlock will not be needed most 56 * atomic ops (bit_*), so taking the spinlock will not be needed most
@@ -75,7 +77,8 @@ struct fence {
75 struct rcu_head rcu; 77 struct rcu_head rcu;
76 struct list_head cb_list; 78 struct list_head cb_list;
77 spinlock_t *lock; 79 spinlock_t *lock;
78 unsigned context, seqno; 80 u64 context;
81 unsigned seqno;
79 unsigned long flags; 82 unsigned long flags;
80 ktime_t timestamp; 83 ktime_t timestamp;
81 int status; 84 int status;
@@ -178,7 +181,7 @@ struct fence_ops {
178}; 181};
179 182
180void fence_init(struct fence *fence, const struct fence_ops *ops, 183void fence_init(struct fence *fence, const struct fence_ops *ops,
181 spinlock_t *lock, unsigned context, unsigned seqno); 184 spinlock_t *lock, u64 context, unsigned seqno);
182 185
183void fence_release(struct kref *kref); 186void fence_release(struct kref *kref);
184void fence_free(struct fence *fence); 187void fence_free(struct fence *fence);
@@ -352,27 +355,27 @@ static inline signed long fence_wait(struct fence *fence, bool intr)
352 return ret < 0 ? ret : 0; 355 return ret < 0 ? ret : 0;
353} 356}
354 357
355unsigned fence_context_alloc(unsigned num); 358u64 fence_context_alloc(unsigned num);
356 359
357#define FENCE_TRACE(f, fmt, args...) \ 360#define FENCE_TRACE(f, fmt, args...) \
358 do { \ 361 do { \
359 struct fence *__ff = (f); \ 362 struct fence *__ff = (f); \
360 if (config_enabled(CONFIG_FENCE_TRACE)) \ 363 if (config_enabled(CONFIG_FENCE_TRACE)) \
361 pr_info("f %u#%u: " fmt, \ 364 pr_info("f %llu#%u: " fmt, \
362 __ff->context, __ff->seqno, ##args); \ 365 __ff->context, __ff->seqno, ##args); \
363 } while (0) 366 } while (0)
364 367
365#define FENCE_WARN(f, fmt, args...) \ 368#define FENCE_WARN(f, fmt, args...) \
366 do { \ 369 do { \
367 struct fence *__ff = (f); \ 370 struct fence *__ff = (f); \
368 pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \ 371 pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
369 ##args); \ 372 ##args); \
370 } while (0) 373 } while (0)
371 374
372#define FENCE_ERR(f, fmt, args...) \ 375#define FENCE_ERR(f, fmt, args...) \
373 do { \ 376 do { \
374 struct fence *__ff = (f); \ 377 struct fence *__ff = (f); \
375 pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \ 378 pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
376 ##args); \ 379 ##args); \
377 } while (0) 380 } while (0)
378 381
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 604e1526cd00..13ba552e6c09 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -241,7 +241,7 @@ struct fscache_cache_ops {
241 241
242 /* check the consistency between the backing cache and the FS-Cache 242 /* check the consistency between the backing cache and the FS-Cache
243 * cookie */ 243 * cookie */
244 bool (*check_consistency)(struct fscache_operation *op); 244 int (*check_consistency)(struct fscache_operation *op);
245 245
246 /* store the updated auxiliary data on an object */ 246 /* store the updated auxiliary data on an object */
247 void (*update_object)(struct fscache_object *object); 247 void (*update_object)(struct fscache_object *object);
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index e399029b68c5..645ad06b5d52 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -100,14 +100,16 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
100} 100}
101 101
102static inline void __iomem * 102static inline void __iomem *
103io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) 103io_mapping_map_wc(struct io_mapping *mapping,
104 unsigned long offset,
105 unsigned long size)
104{ 106{
105 resource_size_t phys_addr; 107 resource_size_t phys_addr;
106 108
107 BUG_ON(offset >= mapping->size); 109 BUG_ON(offset >= mapping->size);
108 phys_addr = mapping->base + offset; 110 phys_addr = mapping->base + offset;
109 111
110 return ioremap_wc(phys_addr, PAGE_SIZE); 112 return ioremap_wc(phys_addr, size);
111} 113}
112 114
113static inline void 115static inline void
@@ -155,7 +157,9 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
155 157
156/* Non-atomic map/unmap */ 158/* Non-atomic map/unmap */
157static inline void __iomem * 159static inline void __iomem *
158io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) 160io_mapping_map_wc(struct io_mapping *mapping,
161 unsigned long offset,
162 unsigned long size)
159{ 163{
160 return ((char __force __iomem *) mapping) + offset; 164 return ((char __force __iomem *) mapping) + offset;
161} 165}
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index bfbd707de390..dc493e0f0ff7 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -305,12 +305,12 @@
305#define ICC_SGI1R_AFFINITY_1_SHIFT 16 305#define ICC_SGI1R_AFFINITY_1_SHIFT 16
306#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) 306#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
307#define ICC_SGI1R_SGI_ID_SHIFT 24 307#define ICC_SGI1R_SGI_ID_SHIFT 24
308#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT) 308#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT)
309#define ICC_SGI1R_AFFINITY_2_SHIFT 32 309#define ICC_SGI1R_AFFINITY_2_SHIFT 32
310#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) 310#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT)
311#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 311#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
312#define ICC_SGI1R_AFFINITY_3_SHIFT 48 312#define ICC_SGI1R_AFFINITY_3_SHIFT 48
313#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) 313#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT)
314 314
315#include <asm/arch_gicv3.h> 315#include <asm/arch_gicv3.h>
316 316
diff --git a/include/linux/namei.h b/include/linux/namei.h
index ec5ec2818a28..d3d0398f2a1b 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -45,6 +45,8 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
45#define LOOKUP_ROOT 0x2000 45#define LOOKUP_ROOT 0x2000
46#define LOOKUP_EMPTY 0x4000 46#define LOOKUP_EMPTY 0x4000
47 47
48extern int path_pts(struct path *path);
49
48extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty); 50extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
49 51
50static inline int user_path_at(int dfd, const char __user *name, unsigned flags, 52static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index bf268fa92c5b..fec40271339f 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops;
46 46
47static inline bool page_is_young(struct page *page) 47static inline bool page_is_young(struct page *page)
48{ 48{
49 return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); 49 struct page_ext *page_ext = lookup_page_ext(page);
50
51 if (unlikely(!page_ext))
52 return false;
53
54 return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
50} 55}
51 56
52static inline void set_page_young(struct page *page) 57static inline void set_page_young(struct page *page)
53{ 58{
54 set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); 59 struct page_ext *page_ext = lookup_page_ext(page);
60
61 if (unlikely(!page_ext))
62 return;
63
64 set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
55} 65}
56 66
57static inline bool test_and_clear_page_young(struct page *page) 67static inline bool test_and_clear_page_young(struct page *page)
58{ 68{
59 return test_and_clear_bit(PAGE_EXT_YOUNG, 69 struct page_ext *page_ext = lookup_page_ext(page);
60 &lookup_page_ext(page)->flags); 70
71 if (unlikely(!page_ext))
72 return false;
73
74 return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
61} 75}
62 76
63static inline bool page_is_idle(struct page *page) 77static inline bool page_is_idle(struct page *page)
64{ 78{
65 return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); 79 struct page_ext *page_ext = lookup_page_ext(page);
80
81 if (unlikely(!page_ext))
82 return false;
83
84 return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
66} 85}
67 86
68static inline void set_page_idle(struct page *page) 87static inline void set_page_idle(struct page *page)
69{ 88{
70 set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); 89 struct page_ext *page_ext = lookup_page_ext(page);
90
91 if (unlikely(!page_ext))
92 return;
93
94 set_bit(PAGE_EXT_IDLE, &page_ext->flags);
71} 95}
72 96
73static inline void clear_page_idle(struct page *page) 97static inline void clear_page_idle(struct page *page)
74{ 98{
75 clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); 99 struct page_ext *page_ext = lookup_page_ext(page);
100
101 if (unlikely(!page_ext))
102 return;
103
104 clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
76} 105}
77#endif /* CONFIG_64BIT */ 106#endif /* CONFIG_64BIT */
78 107
diff --git a/include/linux/platform_data/omapdss.h b/include/linux/platform_data/omapdss.h
new file mode 100644
index 000000000000..679177929045
--- /dev/null
+++ b/include/linux/platform_data/omapdss.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (C) 2016 Texas Instruments, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#ifndef __OMAPDSS_PDATA_H
11#define __OMAPDSS_PDATA_H
12
13enum omapdss_version {
14 OMAPDSS_VER_UNKNOWN = 0,
15 OMAPDSS_VER_OMAP24xx,
16 OMAPDSS_VER_OMAP34xx_ES1, /* OMAP3430 ES1.0, 2.0 */
17 OMAPDSS_VER_OMAP34xx_ES3, /* OMAP3430 ES3.0+ */
18 OMAPDSS_VER_OMAP3630,
19 OMAPDSS_VER_AM35xx,
20 OMAPDSS_VER_OMAP4430_ES1, /* OMAP4430 ES1.0 */
21 OMAPDSS_VER_OMAP4430_ES2, /* OMAP4430 ES2.0, 2.1, 2.2 */
22 OMAPDSS_VER_OMAP4, /* All other OMAP4s */
23 OMAPDSS_VER_OMAP5,
24 OMAPDSS_VER_AM43xx,
25 OMAPDSS_VER_DRA7xx,
26};
27
28/* Board specific data */
29struct omap_dss_board_info {
30 const char *default_display_name;
31 int (*dsi_enable_pads)(int dsi_id, unsigned int lane_mask);
32 void (*dsi_disable_pads)(int dsi_id, unsigned int lane_mask);
33 int (*set_min_bus_tput)(struct device *dev, unsigned long r);
34 enum omapdss_version version;
35};
36
37#endif /* __OMAPDSS_PDATA_H */
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 49d057655d62..b0f305e77b7f 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -49,12 +49,27 @@ extern struct ww_class reservation_ww_class;
49extern struct lock_class_key reservation_seqcount_class; 49extern struct lock_class_key reservation_seqcount_class;
50extern const char reservation_seqcount_string[]; 50extern const char reservation_seqcount_string[];
51 51
52/**
53 * struct reservation_object_list - a list of shared fences
54 * @rcu: for internal use
55 * @shared_count: table of shared fences
56 * @shared_max: for growing shared fence table
57 * @shared: shared fence table
58 */
52struct reservation_object_list { 59struct reservation_object_list {
53 struct rcu_head rcu; 60 struct rcu_head rcu;
54 u32 shared_count, shared_max; 61 u32 shared_count, shared_max;
55 struct fence __rcu *shared[]; 62 struct fence __rcu *shared[];
56}; 63};
57 64
65/**
66 * struct reservation_object - a reservation object manages fences for a buffer
67 * @lock: update side lock
68 * @seq: sequence count for managing RCU read-side synchronization
69 * @fence_excl: the exclusive fence, if there is one currently
70 * @fence: list of current shared fences
71 * @staged: staged copy of shared fences for RCU updates
72 */
58struct reservation_object { 73struct reservation_object {
59 struct ww_mutex lock; 74 struct ww_mutex lock;
60 seqcount_t seq; 75 seqcount_t seq;
@@ -68,6 +83,10 @@ struct reservation_object {
68#define reservation_object_assert_held(obj) \ 83#define reservation_object_assert_held(obj) \
69 lockdep_assert_held(&(obj)->lock.base) 84 lockdep_assert_held(&(obj)->lock.base)
70 85
86/**
87 * reservation_object_init - initialize a reservation object
88 * @obj: the reservation object
89 */
71static inline void 90static inline void
72reservation_object_init(struct reservation_object *obj) 91reservation_object_init(struct reservation_object *obj)
73{ 92{
@@ -79,6 +98,10 @@ reservation_object_init(struct reservation_object *obj)
79 obj->staged = NULL; 98 obj->staged = NULL;
80} 99}
81 100
101/**
102 * reservation_object_fini - destroys a reservation object
103 * @obj: the reservation object
104 */
82static inline void 105static inline void
83reservation_object_fini(struct reservation_object *obj) 106reservation_object_fini(struct reservation_object *obj)
84{ 107{
@@ -106,6 +129,14 @@ reservation_object_fini(struct reservation_object *obj)
106 ww_mutex_destroy(&obj->lock); 129 ww_mutex_destroy(&obj->lock);
107} 130}
108 131
132/**
133 * reservation_object_get_list - get the reservation object's
134 * shared fence list, with update-side lock held
135 * @obj: the reservation object
136 *
137 * Returns the shared fence list. Does NOT take references to
138 * the fence. The obj->lock must be held.
139 */
109static inline struct reservation_object_list * 140static inline struct reservation_object_list *
110reservation_object_get_list(struct reservation_object *obj) 141reservation_object_get_list(struct reservation_object *obj)
111{ 142{
@@ -113,6 +144,17 @@ reservation_object_get_list(struct reservation_object *obj)
113 reservation_object_held(obj)); 144 reservation_object_held(obj));
114} 145}
115 146
147/**
148 * reservation_object_get_excl - get the reservation object's
149 * exclusive fence, with update-side lock held
150 * @obj: the reservation object
151 *
152 * Returns the exclusive fence (if any). Does NOT take a
153 * reference. The obj->lock must be held.
154 *
155 * RETURNS
156 * The exclusive fence or NULL
157 */
116static inline struct fence * 158static inline struct fence *
117reservation_object_get_excl(struct reservation_object *obj) 159reservation_object_get_excl(struct reservation_object *obj)
118{ 160{
@@ -120,6 +162,17 @@ reservation_object_get_excl(struct reservation_object *obj)
120 reservation_object_held(obj)); 162 reservation_object_held(obj));
121} 163}
122 164
165/**
166 * reservation_object_get_excl_rcu - get the reservation object's
167 * exclusive fence, without lock held.
168 * @obj: the reservation object
169 *
170 * If there is an exclusive fence, this atomically increments it's
171 * reference count and returns it.
172 *
173 * RETURNS
174 * The exclusive fence or NULL if none
175 */
123static inline struct fence * 176static inline struct fence *
124reservation_object_get_excl_rcu(struct reservation_object *obj) 177reservation_object_get_excl_rcu(struct reservation_object *obj)
125{ 178{
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index dacb5e711994..de1f64318fc4 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -765,6 +765,8 @@ struct sctp_info {
765 __u8 sctpi_s_disable_fragments; 765 __u8 sctpi_s_disable_fragments;
766 __u8 sctpi_s_v4mapped; 766 __u8 sctpi_s_v4mapped;
767 __u8 sctpi_s_frag_interleave; 767 __u8 sctpi_s_frag_interleave;
768 __u32 sctpi_s_type;
769 __u32 __reserved3;
768}; 770};
769 771
770struct sctp_infox { 772struct sctp_infox {
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 37dbacf84849..816b7543f81b 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -21,6 +21,9 @@ static inline int do_sys_settimeofday(const struct timespec *tv,
21 struct timespec64 ts64; 21 struct timespec64 ts64;
22 22
23 if (!tv) 23 if (!tv)
24 return do_sys_settimeofday64(NULL, tz);
25
26 if (!timespec_valid(tv))
24 return -EINVAL; 27 return -EINVAL;
25 28
26 ts64 = timespec_to_timespec64(*tv); 29 ts64 = timespec_to_timespec64(*tv);
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index b39a5f3153bd..960bedbdec87 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -165,6 +165,7 @@ int vga_switcheroo_unlock_ddc(struct pci_dev *pdev);
165 165
166int vga_switcheroo_process_delayed_switch(void); 166int vga_switcheroo_process_delayed_switch(void);
167 167
168bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev);
168enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev); 169enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev);
169 170
170void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic); 171void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
@@ -188,6 +189,7 @@ static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(v
188static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; } 189static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; }
189static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; } 190static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; }
190static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } 191static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
192static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; }
191static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } 193static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
192 194
193static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {} 195static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index d325c81332e3..43a5a0e4524c 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -63,6 +63,8 @@ struct ip6_tnl_encap_ops {
63 u8 *protocol, struct flowi6 *fl6); 63 u8 *protocol, struct flowi6 *fl6);
64}; 64};
65 65
66#ifdef CONFIG_INET
67
66extern const struct ip6_tnl_encap_ops __rcu * 68extern const struct ip6_tnl_encap_ops __rcu *
67 ip6tun_encaps[MAX_IPTUN_ENCAP_OPS]; 69 ip6tun_encaps[MAX_IPTUN_ENCAP_OPS];
68 70
@@ -138,7 +140,6 @@ struct net *ip6_tnl_get_link_net(const struct net_device *dev);
138int ip6_tnl_get_iflink(const struct net_device *dev); 140int ip6_tnl_get_iflink(const struct net_device *dev);
139int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu); 141int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
140 142
141#ifdef CONFIG_INET
142static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, 143static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
143 struct net_device *dev) 144 struct net_device *dev)
144{ 145{
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 401038d2f9b8..fea53f4d92ca 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -61,6 +61,7 @@ psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
61} 61}
62 62
63struct qdisc_watchdog { 63struct qdisc_watchdog {
64 u64 last_expires;
64 struct hrtimer timer; 65 struct hrtimer timer;
65 struct Qdisc *qdisc; 66 struct Qdisc *qdisc;
66}; 67};
diff --git a/include/sound/omap-hdmi-audio.h b/include/sound/omap-hdmi-audio.h
index afdb416898e0..1df2ff61a4dd 100644
--- a/include/sound/omap-hdmi-audio.h
+++ b/include/sound/omap-hdmi-audio.h
@@ -16,11 +16,16 @@
16 * 16 *
17 */ 17 */
18 18
19#include <video/omapdss.h>
20
21#ifndef __OMAP_HDMI_AUDIO_H__ 19#ifndef __OMAP_HDMI_AUDIO_H__
22#define __OMAP_HDMI_AUDIO_H__ 20#define __OMAP_HDMI_AUDIO_H__
23 21
22#include <linux/platform_data/omapdss.h>
23
24struct omap_dss_audio {
25 struct snd_aes_iec958 *iec;
26 struct snd_cea_861_aud_if *cea;
27};
28
24struct omap_hdmi_audio_ops { 29struct omap_hdmi_audio_ops {
25 int (*audio_startup)(struct device *dev, 30 int (*audio_startup)(struct device *dev,
26 void (*abort_cb)(struct device *dev)); 31 void (*abort_cb)(struct device *dev));
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 9222db8ccccc..5f030b46cff4 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1353,6 +1353,15 @@ enum ethtool_link_mode_bit_indices {
1353 ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, 1353 ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28,
1354 ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, 1354 ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29,
1355 ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, 1355 ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30,
1356 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31,
1357 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32,
1358 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33,
1359 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34,
1360 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35,
1361 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36,
1362 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37,
1363 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38,
1364 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39,
1356 1365
1357 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit 1366 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
1358 * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* 1367 * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
@@ -1361,7 +1370,7 @@ enum ethtool_link_mode_bit_indices {
1361 */ 1370 */
1362 1371
1363 __ETHTOOL_LINK_MODE_LAST 1372 __ETHTOOL_LINK_MODE_LAST
1364 = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 1373 = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1365}; 1374};
1366 1375
1367#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ 1376#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index eba5914ba5d1..f4297c8a42fe 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -145,6 +145,8 @@ enum {
145 TCA_POLICE_PEAKRATE, 145 TCA_POLICE_PEAKRATE,
146 TCA_POLICE_AVRATE, 146 TCA_POLICE_AVRATE,
147 TCA_POLICE_RESULT, 147 TCA_POLICE_RESULT,
148 TCA_POLICE_TM,
149 TCA_POLICE_PAD,
148 __TCA_POLICE_MAX 150 __TCA_POLICE_MAX
149#define TCA_POLICE_RESULT TCA_POLICE_RESULT 151#define TCA_POLICE_RESULT TCA_POLICE_RESULT
150}; 152};
@@ -173,7 +175,7 @@ enum {
173 TCA_U32_DIVISOR, 175 TCA_U32_DIVISOR,
174 TCA_U32_SEL, 176 TCA_U32_SEL,
175 TCA_U32_POLICE, 177 TCA_U32_POLICE,
176 TCA_U32_ACT, 178 TCA_U32_ACT,
177 TCA_U32_INDEV, 179 TCA_U32_INDEV,
178 TCA_U32_PCNT, 180 TCA_U32_PCNT,
179 TCA_U32_MARK, 181 TCA_U32_MARK,
diff --git a/include/video/omap-panel-data.h b/include/video/omap-panel-data.h
index 56830d1dc762..e7003ee6e063 100644
--- a/include/video/omap-panel-data.h
+++ b/include/video/omap-panel-data.h
@@ -27,59 +27,18 @@
27#ifndef __OMAP_PANEL_DATA_H 27#ifndef __OMAP_PANEL_DATA_H
28#define __OMAP_PANEL_DATA_H 28#define __OMAP_PANEL_DATA_H
29 29
30#include <video/omapdss.h>
31#include <video/display_timing.h> 30#include <video/display_timing.h>
32 31
33struct omap_dss_device;
34
35/**
36 * encoder_tfp410 platform data
37 * @name: name for this display entity
38 * @power_down_gpio: gpio number for PD pin (or -1 if not available)
39 * @data_lines: number of DPI datalines
40 */
41struct encoder_tfp410_platform_data {
42 const char *name;
43 const char *source;
44 int power_down_gpio;
45 int data_lines;
46};
47
48
49/**
50 * connector_dvi platform data
51 * @name: name for this display entity
52 * @source: name of the display entity used as a video source
53 * @i2c_bus_num: i2c bus number to be used for reading EDID
54 */
55struct connector_dvi_platform_data {
56 const char *name;
57 const char *source;
58 int i2c_bus_num;
59};
60
61/**
62 * connector_hdmi platform data
63 * @name: name for this display entity
64 * @source: name of the display entity used as a video source
65 */
66struct connector_hdmi_platform_data {
67 const char *name;
68 const char *source;
69};
70
71/** 32/**
72 * connector_atv platform data 33 * connector_atv platform data
73 * @name: name for this display entity 34 * @name: name for this display entity
74 * @source: name of the display entity used as a video source 35 * @source: name of the display entity used as a video source
75 * @connector_type: composite/svideo
76 * @invert_polarity: invert signal polarity 36 * @invert_polarity: invert signal polarity
77 */ 37 */
78struct connector_atv_platform_data { 38struct connector_atv_platform_data {
79 const char *name; 39 const char *name;
80 const char *source; 40 const char *source;
81 41
82 enum omap_dss_venc_type connector_type;
83 bool invert_polarity; 42 bool invert_polarity;
84}; 43};
85 44
@@ -105,33 +64,6 @@ struct panel_dpi_platform_data {
105}; 64};
106 65
107/** 66/**
108 * panel_dsicm platform data
109 * @name: name for this display entity
110 * @source: name of the display entity used as a video source
111 * @reset_gpio: gpio to reset the panel (or -1)
112 * @use_ext_te: use external TE GPIO
113 * @ext_te_gpio: external TE GPIO
114 * @ulps_timeout: time to wait before entering ULPS, 0 = disabled (ms)
115 * @use_dsi_backlight: true if panel uses DSI command to control backlight
116 * @pin_config: DSI pin configuration
117 */
118struct panel_dsicm_platform_data {
119 const char *name;
120 const char *source;
121
122 int reset_gpio;
123
124 bool use_ext_te;
125 int ext_te_gpio;
126
127 unsigned ulps_timeout;
128
129 bool use_dsi_backlight;
130
131 struct omap_dsi_pin_config pin_config;
132};
133
134/**
135 * panel_acx565akm platform data 67 * panel_acx565akm platform data
136 * @name: name for this display entity 68 * @name: name for this display entity
137 * @source: name of the display entity used as a video source 69 * @source: name of the display entity used as a video source
@@ -147,93 +79,4 @@ struct panel_acx565akm_platform_data {
147 int datapairs; 79 int datapairs;
148}; 80};
149 81
150/**
151 * panel_lb035q02 platform data
152 * @name: name for this display entity
153 * @source: name of the display entity used as a video source
154 * @data_lines: number of DPI datalines
155 * @backlight_gpio: gpio to enable/disable the backlight (or -1)
156 * @enable_gpio: gpio to enable/disable the panel (or -1)
157 */
158struct panel_lb035q02_platform_data {
159 const char *name;
160 const char *source;
161
162 int data_lines;
163
164 int backlight_gpio;
165 int enable_gpio;
166};
167
168/**
169 * panel_sharp_ls037v7dw01 platform data
170 * @name: name for this display entity
171 * @source: name of the display entity used as a video source
172 * @data_lines: number of DPI datalines
173 * @resb_gpio: reset signal GPIO
174 * @ini_gpio: power on control GPIO
175 * @mo_gpio: selection for resolution(VGA/QVGA) GPIO
176 * @lr_gpio: selection for horizontal scanning direction GPIO
177 * @ud_gpio: selection for vertical scanning direction GPIO
178 */
179struct panel_sharp_ls037v7dw01_platform_data {
180 const char *name;
181 const char *source;
182
183 int data_lines;
184
185 int resb_gpio;
186 int ini_gpio;
187 int mo_gpio;
188 int lr_gpio;
189 int ud_gpio;
190};
191
192/**
193 * panel-tpo-td043mtea1 platform data
194 * @name: name for this display entity
195 * @source: name of the display entity used as a video source
196 * @data_lines: number of DPI datalines
197 * @nreset_gpio: reset signal
198 */
199struct panel_tpo_td043mtea1_platform_data {
200 const char *name;
201 const char *source;
202
203 int data_lines;
204
205 int nreset_gpio;
206};
207
208/**
209 * panel-nec-nl8048hl11 platform data
210 * @name: name for this display entity
211 * @source: name of the display entity used as a video source
212 * @data_lines: number of DPI datalines
213 * @res_gpio: reset signal
214 * @qvga_gpio: selection for resolution(QVGA/WVGA)
215 */
216struct panel_nec_nl8048hl11_platform_data {
217 const char *name;
218 const char *source;
219
220 int data_lines;
221
222 int res_gpio;
223 int qvga_gpio;
224};
225
226/**
227 * panel-tpo-td028ttec1 platform data
228 * @name: name for display entity
229 * @source: name of the display entity used as a video source
230 * @data_lines: number of DPI datalines
231 */
232struct panel_tpo_td028ttec1_platform_data {
233 const char *name;
234 const char *source;
235
236 int data_lines;
237};
238
239#endif /* __OMAP_PANEL_DATA_H */ 82#endif /* __OMAP_PANEL_DATA_H */
diff --git a/include/video/omapdss.h b/include/video/omapfb_dss.h
index 8e14ad7327c9..1d38901d599d 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapfb_dss.h
@@ -1,27 +1,20 @@
1/* 1/*
2 * Copyright (C) 2008 Nokia Corporation 2 * Copyright (C) 2016 Texas Instruments, Inc.
3 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify
6 * under the terms of the GNU General Public License version 2 as published by 5 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 6 * the Free Software Foundation; either version 2 of the License, or
8 * 7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 8 */
17 9
18#ifndef __OMAP_OMAPDSS_H 10#ifndef __OMAPFB_DSS_H
19#define __OMAP_OMAPDSS_H 11#define __OMAPFB_DSS_H
20 12
21#include <linux/list.h> 13#include <linux/list.h>
22#include <linux/kobject.h> 14#include <linux/kobject.h>
23#include <linux/device.h> 15#include <linux/device.h>
24#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/platform_data/omapdss.h>
25 18
26#include <video/videomode.h> 19#include <video/videomode.h>
27 20
@@ -167,11 +160,6 @@ enum omap_dss_display_state {
167 OMAP_DSS_DISPLAY_ACTIVE, 160 OMAP_DSS_DISPLAY_ACTIVE,
168}; 161};
169 162
170struct omap_dss_audio {
171 struct snd_aes_iec958 *iec;
172 struct snd_cea_861_aud_if *cea;
173};
174
175enum omap_dss_rotation_type { 163enum omap_dss_rotation_type {
176 OMAP_DSS_ROT_DMA = 1 << 0, 164 OMAP_DSS_ROT_DMA = 1 << 0,
177 OMAP_DSS_ROT_VRFB = 1 << 1, 165 OMAP_DSS_ROT_VRFB = 1 << 1,
@@ -195,25 +183,6 @@ enum omap_overlay_caps {
195 OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5, 183 OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5,
196}; 184};
197 185
198enum omap_overlay_manager_caps {
199 OMAP_DSS_DUMMY_VALUE, /* add a dummy value to prevent compiler error */
200};
201
202enum omap_dss_clk_source {
203 OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK
204 * OMAP4: DSS_FCLK */
205 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
206 * OMAP4: PLL1_CLK1 */
207 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
208 * OMAP4: PLL1_CLK2 */
209 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */
210 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */
211};
212
213enum omap_hdmi_flags {
214 OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP = 1 << 0,
215};
216
217enum omap_dss_output_id { 186enum omap_dss_output_id {
218 OMAP_DSS_OUTPUT_DPI = 1 << 0, 187 OMAP_DSS_OUTPUT_DPI = 1 << 0,
219 OMAP_DSS_OUTPUT_DBI = 1 << 1, 188 OMAP_DSS_OUTPUT_DBI = 1 << 1,
@@ -303,36 +272,6 @@ struct omap_dss_dsi_config {
303 enum omap_dss_dsi_trans_mode trans_mode; 272 enum omap_dss_dsi_trans_mode trans_mode;
304}; 273};
305 274
306enum omapdss_version {
307 OMAPDSS_VER_UNKNOWN = 0,
308 OMAPDSS_VER_OMAP24xx,
309 OMAPDSS_VER_OMAP34xx_ES1, /* OMAP3430 ES1.0, 2.0 */
310 OMAPDSS_VER_OMAP34xx_ES3, /* OMAP3430 ES3.0+ */
311 OMAPDSS_VER_OMAP3630,
312 OMAPDSS_VER_AM35xx,
313 OMAPDSS_VER_OMAP4430_ES1, /* OMAP4430 ES1.0 */
314 OMAPDSS_VER_OMAP4430_ES2, /* OMAP4430 ES2.0, 2.1, 2.2 */
315 OMAPDSS_VER_OMAP4, /* All other OMAP4s */
316 OMAPDSS_VER_OMAP5,
317 OMAPDSS_VER_AM43xx,
318 OMAPDSS_VER_DRA7xx,
319};
320
321/* Board specific data */
322struct omap_dss_board_info {
323 int num_devices;
324 struct omap_dss_device **devices;
325 struct omap_dss_device *default_device;
326 const char *default_display_name;
327 int (*dsi_enable_pads)(int dsi_id, unsigned lane_mask);
328 void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
329 int (*set_min_bus_tput)(struct device *dev, unsigned long r);
330 enum omapdss_version version;
331};
332
333/* Init with the board info */
334extern int omap_display_init(struct omap_dss_board_info *board_data);
335
336struct omap_video_timings { 275struct omap_video_timings {
337 /* Unit: pixels */ 276 /* Unit: pixels */
338 u16 x_res; 277 u16 x_res;
@@ -463,7 +402,6 @@ struct omap_overlay_manager {
463 /* static fields */ 402 /* static fields */
464 const char *name; 403 const char *name;
465 enum omap_channel id; 404 enum omap_channel id;
466 enum omap_overlay_manager_caps caps;
467 struct list_head overlays; 405 struct list_head overlays;
468 enum omap_display_type supported_displays; 406 enum omap_display_type supported_displays;
469 enum omap_dss_output_id supported_outputs; 407 enum omap_dss_output_id supported_outputs;
@@ -919,4 +857,4 @@ omapdss_of_get_first_endpoint(const struct device_node *parent);
919struct omap_dss_device * 857struct omap_dss_device *
920omapdss_of_find_source_for_first_ep(struct device_node *node); 858omapdss_of_find_source_for_first_ep(struct device_node *node);
921 859
922#endif 860#endif /* __OMAPFB_DSS_H */
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 04be7021f848..318858edb1cd 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -365,7 +365,6 @@ static struct file_system_type bpf_fs_type = {
365 .name = "bpf", 365 .name = "bpf",
366 .mount = bpf_mount, 366 .mount = bpf_mount,
367 .kill_sb = kill_litter_super, 367 .kill_sb = kill_litter_super,
368 .fs_flags = FS_USERNS_MOUNT,
369}; 368};
370 369
371MODULE_ALIAS_FS("bpf"); 370MODULE_ALIAS_FS("bpf");
diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
index c42742208e5e..89b49f6773f0 100644
--- a/kernel/irq/ipi.c
+++ b/kernel/irq/ipi.c
@@ -125,7 +125,7 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
125 125
126 domain = data->domain; 126 domain = data->domain;
127 if (WARN_ON(domain == NULL)) 127 if (WARN_ON(domain == NULL))
128 return; 128 return -EINVAL;
129 129
130 if (!irq_domain_is_ipi(domain)) { 130 if (!irq_domain_is_ipi(domain)) {
131 pr_warn("Trying to destroy a non IPI domain!\n"); 131 pr_warn("Trying to destroy a non IPI domain!\n");
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 8c7392c4fdbd..e99df0ff1d42 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -425,6 +425,7 @@ void destroy_hrtimer_on_stack(struct hrtimer *timer)
425{ 425{
426 debug_object_free(timer, &hrtimer_debug_descr); 426 debug_object_free(timer, &hrtimer_debug_descr);
427} 427}
428EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
428 429
429#else 430#else
430static inline void debug_hrtimer_init(struct hrtimer *timer) { } 431static inline void debug_hrtimer_init(struct hrtimer *timer) { }
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 77d7d034bac3..b9cfdbfae9aa 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1841,6 +1841,9 @@ config TEST_BITMAP
1841 1841
1842 If unsure, say N. 1842 If unsure, say N.
1843 1843
1844config TEST_UUID
1845 tristate "Test functions located in the uuid module at runtime"
1846
1844config TEST_RHASHTABLE 1847config TEST_RHASHTABLE
1845 tristate "Perform selftest on resizable hash table" 1848 tristate "Perform selftest on resizable hash table"
1846 default n 1849 default n
diff --git a/lib/Makefile b/lib/Makefile
index 499fb354d627..ff6a7a6c6395 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
58obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o 58obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
59obj-$(CONFIG_TEST_PRINTF) += test_printf.o 59obj-$(CONFIG_TEST_PRINTF) += test_printf.o
60obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o 60obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
61obj-$(CONFIG_TEST_UUID) += test_uuid.o
61 62
62ifeq ($(CONFIG_DEBUG_KOBJECT),y) 63ifeq ($(CONFIG_DEBUG_KOBJECT),y)
63CFLAGS_kobject.o += -DDEBUG 64CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/test_uuid.c b/lib/test_uuid.c
new file mode 100644
index 000000000000..547d3127a3cf
--- /dev/null
+++ b/lib/test_uuid.c
@@ -0,0 +1,133 @@
1/*
2 * Test cases for lib/uuid.c module.
3 */
4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6#include <linux/init.h>
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/string.h>
10#include <linux/uuid.h>
11
12struct test_uuid_data {
13 const char *uuid;
14 uuid_le le;
15 uuid_be be;
16};
17
18static const struct test_uuid_data test_uuid_test_data[] = {
19 {
20 .uuid = "c33f4995-3701-450e-9fbf-206a2e98e576",
21 .le = UUID_LE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
22 .be = UUID_BE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
23 },
24 {
25 .uuid = "64b4371c-77c1-48f9-8221-29f054fc023b",
26 .le = UUID_LE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
27 .be = UUID_BE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
28 },
29 {
30 .uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84",
31 .le = UUID_LE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
32 .be = UUID_BE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
33 },
34};
35
36static const char * const test_uuid_wrong_data[] = {
37 "c33f4995-3701-450e-9fbf206a2e98e576 ", /* no hyphen(s) */
38 "64b4371c-77c1-48f9-8221-29f054XX023b", /* invalid character(s) */
39 "0cb4ddff-a545-4401-9d06-688af53e", /* not enough data */
40};
41
42static unsigned total_tests __initdata;
43static unsigned failed_tests __initdata;
44
45static void __init test_uuid_failed(const char *prefix, bool wrong, bool be,
46 const char *data, const char *actual)
47{
48 pr_err("%s test #%u %s %s data: '%s'\n",
49 prefix,
50 total_tests,
51 wrong ? "passed on wrong" : "failed on",
52 be ? "BE" : "LE",
53 data);
54 if (actual && *actual)
55 pr_err("%s test #%u actual data: '%s'\n",
56 prefix,
57 total_tests,
58 actual);
59 failed_tests++;
60}
61
62static void __init test_uuid_test(const struct test_uuid_data *data)
63{
64 uuid_le le;
65 uuid_be be;
66 char buf[48];
67
68 /* LE */
69 total_tests++;
70 if (uuid_le_to_bin(data->uuid, &le))
71 test_uuid_failed("conversion", false, false, data->uuid, NULL);
72
73 total_tests++;
74 if (uuid_le_cmp(data->le, le)) {
75 sprintf(buf, "%pUl", &le);
76 test_uuid_failed("cmp", false, false, data->uuid, buf);
77 }
78
79 /* BE */
80 total_tests++;
81 if (uuid_be_to_bin(data->uuid, &be))
82 test_uuid_failed("conversion", false, true, data->uuid, NULL);
83
84 total_tests++;
85 if (uuid_be_cmp(data->be, be)) {
86 sprintf(buf, "%pUb", &be);
87 test_uuid_failed("cmp", false, true, data->uuid, buf);
88 }
89}
90
91static void __init test_uuid_wrong(const char *data)
92{
93 uuid_le le;
94 uuid_be be;
95
96 /* LE */
97 total_tests++;
98 if (!uuid_le_to_bin(data, &le))
99 test_uuid_failed("negative", true, false, data, NULL);
100
101 /* BE */
102 total_tests++;
103 if (!uuid_be_to_bin(data, &be))
104 test_uuid_failed("negative", true, true, data, NULL);
105}
106
107static int __init test_uuid_init(void)
108{
109 unsigned int i;
110
111 for (i = 0; i < ARRAY_SIZE(test_uuid_test_data); i++)
112 test_uuid_test(&test_uuid_test_data[i]);
113
114 for (i = 0; i < ARRAY_SIZE(test_uuid_wrong_data); i++)
115 test_uuid_wrong(test_uuid_wrong_data[i]);
116
117 if (failed_tests == 0)
118 pr_info("all %u tests passed\n", total_tests);
119 else
120 pr_err("failed %u out of %u tests\n", failed_tests, total_tests);
121
122 return failed_tests ? -EINVAL : 0;
123}
124module_init(test_uuid_init);
125
126static void __exit test_uuid_exit(void)
127{
128 /* do nothing */
129}
130module_exit(test_uuid_exit);
131
132MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
133MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/uuid.c b/lib/uuid.c
index e116ae5fa00f..37687af77ff8 100644
--- a/lib/uuid.c
+++ b/lib/uuid.c
@@ -106,8 +106,8 @@ static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16])
106 return -EINVAL; 106 return -EINVAL;
107 107
108 for (i = 0; i < 16; i++) { 108 for (i = 0; i < 16; i++) {
109 int hi = hex_to_bin(uuid[si[i]] + 0); 109 int hi = hex_to_bin(uuid[si[i] + 0]);
110 int lo = hex_to_bin(uuid[si[i]] + 1); 110 int lo = hex_to_bin(uuid[si[i] + 1]);
111 111
112 b[ei[i]] = (hi << 4) | lo; 112 b[ei[i]] = (hi << 4) | lo;
113 } 113 }
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 925b431f3f03..58c69c94402a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2896,6 +2896,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
2896 * ordering is imposed by list_lru_node->lock taken by 2896 * ordering is imposed by list_lru_node->lock taken by
2897 * memcg_drain_all_list_lrus(). 2897 * memcg_drain_all_list_lrus().
2898 */ 2898 */
2899 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
2899 css_for_each_descendant_pre(css, &memcg->css) { 2900 css_for_each_descendant_pre(css, &memcg->css) {
2900 child = mem_cgroup_from_css(css); 2901 child = mem_cgroup_from_css(css);
2901 BUG_ON(child->kmemcg_id != kmemcg_id); 2902 BUG_ON(child->kmemcg_id != kmemcg_id);
@@ -2903,6 +2904,8 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
2903 if (!memcg->use_hierarchy) 2904 if (!memcg->use_hierarchy)
2904 break; 2905 break;
2905 } 2906 }
2907 rcu_read_unlock();
2908
2906 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); 2909 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2907 2910
2908 memcg_free_cache_id(kmemcg_id); 2911 memcg_free_cache_id(kmemcg_id);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index dfb1ab61fb23..acbc432d1a52 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -625,8 +625,6 @@ void try_oom_reaper(struct task_struct *tsk)
625 if (atomic_read(&mm->mm_users) > 1) { 625 if (atomic_read(&mm->mm_users) > 1) {
626 rcu_read_lock(); 626 rcu_read_lock();
627 for_each_process(p) { 627 for_each_process(p) {
628 bool exiting;
629
630 if (!process_shares_mm(p, mm)) 628 if (!process_shares_mm(p, mm))
631 continue; 629 continue;
632 if (fatal_signal_pending(p)) 630 if (fatal_signal_pending(p))
@@ -636,10 +634,7 @@ void try_oom_reaper(struct task_struct *tsk)
636 * If the task is exiting make sure the whole thread group 634 * If the task is exiting make sure the whole thread group
637 * is exiting and cannot acces mm anymore. 635 * is exiting and cannot acces mm anymore.
638 */ 636 */
639 spin_lock_irq(&p->sighand->siglock); 637 if (signal_group_exit(p->signal))
640 exiting = signal_group_exit(p->signal);
641 spin_unlock_irq(&p->sighand->siglock);
642 if (exiting)
643 continue; 638 continue;
644 639
645 /* Give up */ 640 /* Give up */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f8f3bfc435ee..6903b695ebae 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -656,6 +656,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
656 return; 656 return;
657 657
658 page_ext = lookup_page_ext(page); 658 page_ext = lookup_page_ext(page);
659 if (unlikely(!page_ext))
660 return;
661
659 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 662 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
660 663
661 INIT_LIST_HEAD(&page->lru); 664 INIT_LIST_HEAD(&page->lru);
@@ -673,6 +676,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
673 return; 676 return;
674 677
675 page_ext = lookup_page_ext(page); 678 page_ext = lookup_page_ext(page);
679 if (unlikely(!page_ext))
680 return;
681
676 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 682 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
677 683
678 set_page_private(page, 0); 684 set_page_private(page, 0);
@@ -2609,11 +2615,12 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
2609 page = list_last_entry(list, struct page, lru); 2615 page = list_last_entry(list, struct page, lru);
2610 else 2616 else
2611 page = list_first_entry(list, struct page, lru); 2617 page = list_first_entry(list, struct page, lru);
2612 } while (page && check_new_pcp(page));
2613 2618
2614 __dec_zone_state(zone, NR_ALLOC_BATCH); 2619 __dec_zone_state(zone, NR_ALLOC_BATCH);
2615 list_del(&page->lru); 2620 list_del(&page->lru);
2616 pcp->count--; 2621 pcp->count--;
2622
2623 } while (check_new_pcp(page));
2617 } else { 2624 } else {
2618 /* 2625 /*
2619 * We most definitely don't want callers attempting to 2626 * We most definitely don't want callers attempting to
@@ -3023,6 +3030,7 @@ reset_fair:
3023 apply_fair = false; 3030 apply_fair = false;
3024 fair_skipped = false; 3031 fair_skipped = false;
3025 reset_alloc_batches(ac->preferred_zoneref->zone); 3032 reset_alloc_batches(ac->preferred_zoneref->zone);
3033 z = ac->preferred_zoneref;
3026 goto zonelist_scan; 3034 goto zonelist_scan;
3027 } 3035 }
3028 3036
@@ -3596,6 +3604,17 @@ retry:
3596 */ 3604 */
3597 alloc_flags = gfp_to_alloc_flags(gfp_mask); 3605 alloc_flags = gfp_to_alloc_flags(gfp_mask);
3598 3606
3607 /*
3608 * Reset the zonelist iterators if memory policies can be ignored.
3609 * These allocations are high priority and system rather than user
3610 * orientated.
3611 */
3612 if ((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)) {
3613 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3614 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3615 ac->high_zoneidx, ac->nodemask);
3616 }
3617
3599 /* This is the last chance, in general, before the goto nopage. */ 3618 /* This is the last chance, in general, before the goto nopage. */
3600 page = get_page_from_freelist(gfp_mask, order, 3619 page = get_page_from_freelist(gfp_mask, order,
3601 alloc_flags & ~ALLOC_NO_WATERMARKS, ac); 3620 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
@@ -3604,12 +3623,6 @@ retry:
3604 3623
3605 /* Allocate without watermarks if the context allows */ 3624 /* Allocate without watermarks if the context allows */
3606 if (alloc_flags & ALLOC_NO_WATERMARKS) { 3625 if (alloc_flags & ALLOC_NO_WATERMARKS) {
3607 /*
3608 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
3609 * the allocation is high priority and these type of
3610 * allocations are system rather than user orientated
3611 */
3612 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3613 page = get_page_from_freelist(gfp_mask, order, 3626 page = get_page_from_freelist(gfp_mask, order,
3614 ALLOC_NO_WATERMARKS, ac); 3627 ALLOC_NO_WATERMARKS, ac);
3615 if (page) 3628 if (page)
@@ -3808,7 +3821,11 @@ retry_cpuset:
3808 /* Dirty zone balancing only done in the fast path */ 3821 /* Dirty zone balancing only done in the fast path */
3809 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); 3822 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
3810 3823
3811 /* The preferred zone is used for statistics later */ 3824 /*
3825 * The preferred zone is used for statistics but crucially it is
3826 * also used as the starting point for the zonelist iterator. It
3827 * may get reset for allocations that ignore memory policies.
3828 */
3812 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, 3829 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
3813 ac.high_zoneidx, ac.nodemask); 3830 ac.high_zoneidx, ac.nodemask);
3814 if (!ac.preferred_zoneref) { 3831 if (!ac.preferred_zoneref) {
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 792b56da13d8..c6cda3e36212 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -55,6 +55,8 @@ void __reset_page_owner(struct page *page, unsigned int order)
55 55
56 for (i = 0; i < (1 << order); i++) { 56 for (i = 0; i < (1 << order); i++) {
57 page_ext = lookup_page_ext(page + i); 57 page_ext = lookup_page_ext(page + i);
58 if (unlikely(!page_ext))
59 continue;
58 __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); 60 __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
59 } 61 }
60} 62}
@@ -62,6 +64,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
62void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) 64void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
63{ 65{
64 struct page_ext *page_ext = lookup_page_ext(page); 66 struct page_ext *page_ext = lookup_page_ext(page);
67
65 struct stack_trace trace = { 68 struct stack_trace trace = {
66 .nr_entries = 0, 69 .nr_entries = 0,
67 .max_entries = ARRAY_SIZE(page_ext->trace_entries), 70 .max_entries = ARRAY_SIZE(page_ext->trace_entries),
@@ -69,6 +72,9 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
69 .skip = 3, 72 .skip = 3,
70 }; 73 };
71 74
75 if (unlikely(!page_ext))
76 return;
77
72 save_stack_trace(&trace); 78 save_stack_trace(&trace);
73 79
74 page_ext->order = order; 80 page_ext->order = order;
@@ -82,6 +88,8 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
82void __set_page_owner_migrate_reason(struct page *page, int reason) 88void __set_page_owner_migrate_reason(struct page *page, int reason)
83{ 89{
84 struct page_ext *page_ext = lookup_page_ext(page); 90 struct page_ext *page_ext = lookup_page_ext(page);
91 if (unlikely(!page_ext))
92 return;
85 93
86 page_ext->last_migrate_reason = reason; 94 page_ext->last_migrate_reason = reason;
87} 95}
@@ -89,6 +97,12 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
89gfp_t __get_page_owner_gfp(struct page *page) 97gfp_t __get_page_owner_gfp(struct page *page)
90{ 98{
91 struct page_ext *page_ext = lookup_page_ext(page); 99 struct page_ext *page_ext = lookup_page_ext(page);
100 if (unlikely(!page_ext))
101 /*
102 * The caller just returns 0 if no valid gfp
103 * So return 0 here too.
104 */
105 return 0;
92 106
93 return page_ext->gfp_mask; 107 return page_ext->gfp_mask;
94} 108}
@@ -99,6 +113,9 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
99 struct page_ext *new_ext = lookup_page_ext(newpage); 113 struct page_ext *new_ext = lookup_page_ext(newpage);
100 int i; 114 int i;
101 115
116 if (unlikely(!old_ext || !new_ext))
117 return;
118
102 new_ext->order = old_ext->order; 119 new_ext->order = old_ext->order;
103 new_ext->gfp_mask = old_ext->gfp_mask; 120 new_ext->gfp_mask = old_ext->gfp_mask;
104 new_ext->nr_entries = old_ext->nr_entries; 121 new_ext->nr_entries = old_ext->nr_entries;
@@ -193,6 +210,11 @@ void __dump_page_owner(struct page *page)
193 gfp_t gfp_mask = page_ext->gfp_mask; 210 gfp_t gfp_mask = page_ext->gfp_mask;
194 int mt = gfpflags_to_migratetype(gfp_mask); 211 int mt = gfpflags_to_migratetype(gfp_mask);
195 212
213 if (unlikely(!page_ext)) {
214 pr_alert("There is not page extension available.\n");
215 return;
216 }
217
196 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { 218 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
197 pr_alert("page_owner info is not active (free page?)\n"); 219 pr_alert("page_owner info is not active (free page?)\n");
198 return; 220 return;
@@ -251,6 +273,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
251 } 273 }
252 274
253 page_ext = lookup_page_ext(page); 275 page_ext = lookup_page_ext(page);
276 if (unlikely(!page_ext))
277 continue;
254 278
255 /* 279 /*
256 * Some pages could be missed by concurrent allocation or free, 280 * Some pages could be missed by concurrent allocation or free,
@@ -317,6 +341,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
317 continue; 341 continue;
318 342
319 page_ext = lookup_page_ext(page); 343 page_ext = lookup_page_ext(page);
344 if (unlikely(!page_ext))
345 continue;
320 346
321 /* Maybe overraping zone */ 347 /* Maybe overraping zone */
322 if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) 348 if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
diff --git a/mm/page_poison.c b/mm/page_poison.c
index 1eae5fad2446..2e647c65916b 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -54,6 +54,9 @@ static inline void set_page_poison(struct page *page)
54 struct page_ext *page_ext; 54 struct page_ext *page_ext;
55 55
56 page_ext = lookup_page_ext(page); 56 page_ext = lookup_page_ext(page);
57 if (unlikely(!page_ext))
58 return;
59
57 __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); 60 __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
58} 61}
59 62
@@ -62,6 +65,9 @@ static inline void clear_page_poison(struct page *page)
62 struct page_ext *page_ext; 65 struct page_ext *page_ext;
63 66
64 page_ext = lookup_page_ext(page); 67 page_ext = lookup_page_ext(page);
68 if (unlikely(!page_ext))
69 return;
70
65 __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); 71 __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
66} 72}
67 73
@@ -70,7 +76,7 @@ bool page_is_poisoned(struct page *page)
70 struct page_ext *page_ext; 76 struct page_ext *page_ext;
71 77
72 page_ext = lookup_page_ext(page); 78 page_ext = lookup_page_ext(page);
73 if (!page_ext) 79 if (unlikely(!page_ext))
74 return false; 80 return false;
75 81
76 return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); 82 return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index cf7ad1a53be0..e11475cdeb7a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1105,7 +1105,7 @@ EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1105 */ 1105 */
1106void vm_unmap_ram(const void *mem, unsigned int count) 1106void vm_unmap_ram(const void *mem, unsigned int count)
1107{ 1107{
1108 unsigned long size = count << PAGE_SHIFT; 1108 unsigned long size = (unsigned long)count << PAGE_SHIFT;
1109 unsigned long addr = (unsigned long)mem; 1109 unsigned long addr = (unsigned long)mem;
1110 1110
1111 BUG_ON(!addr); 1111 BUG_ON(!addr);
@@ -1140,7 +1140,7 @@ EXPORT_SYMBOL(vm_unmap_ram);
1140 */ 1140 */
1141void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 1141void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1142{ 1142{
1143 unsigned long size = count << PAGE_SHIFT; 1143 unsigned long size = (unsigned long)count << PAGE_SHIFT;
1144 unsigned long addr; 1144 unsigned long addr;
1145 void *mem; 1145 void *mem;
1146 1146
@@ -1574,14 +1574,15 @@ void *vmap(struct page **pages, unsigned int count,
1574 unsigned long flags, pgprot_t prot) 1574 unsigned long flags, pgprot_t prot)
1575{ 1575{
1576 struct vm_struct *area; 1576 struct vm_struct *area;
1577 unsigned long size; /* In bytes */
1577 1578
1578 might_sleep(); 1579 might_sleep();
1579 1580
1580 if (count > totalram_pages) 1581 if (count > totalram_pages)
1581 return NULL; 1582 return NULL;
1582 1583
1583 area = get_vm_area_caller((count << PAGE_SHIFT), flags, 1584 size = (unsigned long)count << PAGE_SHIFT;
1584 __builtin_return_address(0)); 1585 area = get_vm_area_caller(size, flags, __builtin_return_address(0));
1585 if (!area) 1586 if (!area)
1586 return NULL; 1587 return NULL;
1587 1588
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 77e42ef388c2..cb2a67bb4158 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1061,6 +1061,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
1061 continue; 1061 continue;
1062 1062
1063 page_ext = lookup_page_ext(page); 1063 page_ext = lookup_page_ext(page);
1064 if (unlikely(!page_ext))
1065 continue;
1064 1066
1065 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) 1067 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
1066 continue; 1068 continue;
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 34917d55d311..8f9e89ca1d31 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -412,7 +412,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
412 /* HEADLESS page stored */ 412 /* HEADLESS page stored */
413 bud = HEADLESS; 413 bud = HEADLESS;
414 } else { 414 } else {
415 bud = (handle - zhdr->first_num) & BUDDY_MASK; 415 bud = handle_to_buddy(handle);
416 416
417 switch (bud) { 417 switch (bud) {
418 case FIRST: 418 case FIRST:
@@ -572,15 +572,19 @@ next:
572 pool->pages_nr--; 572 pool->pages_nr--;
573 spin_unlock(&pool->lock); 573 spin_unlock(&pool->lock);
574 return 0; 574 return 0;
575 } else if (zhdr->first_chunks != 0 && 575 } else if (!test_bit(PAGE_HEADLESS, &page->private)) {
576 zhdr->last_chunks != 0 && zhdr->middle_chunks != 0) { 576 if (zhdr->first_chunks != 0 &&
577 /* Full, add to buddied list */ 577 zhdr->last_chunks != 0 &&
578 list_add(&zhdr->buddy, &pool->buddied); 578 zhdr->middle_chunks != 0) {
579 } else if (!test_bit(PAGE_HEADLESS, &page->private)) { 579 /* Full, add to buddied list */
580 z3fold_compact_page(zhdr); 580 list_add(&zhdr->buddy, &pool->buddied);
581 /* add to unbuddied list */ 581 } else {
582 freechunks = num_free_chunks(zhdr); 582 z3fold_compact_page(zhdr);
583 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); 583 /* add to unbuddied list */
584 freechunks = num_free_chunks(zhdr);
585 list_add(&zhdr->buddy,
586 &pool->unbuddied[freechunks]);
587 }
584 } 588 }
585 589
586 /* add to beginning of LRU */ 590 /* add to beginning of LRU */
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index a1e273af6fc8..82a116ba590e 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -290,6 +290,10 @@ static void vlan_sync_address(struct net_device *dev,
290 if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr)) 290 if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
291 return; 291 return;
292 292
293 /* vlan continues to inherit address of lower device */
294 if (vlan_dev_inherit_address(vlandev, dev))
295 goto out;
296
293 /* vlan address was different from the old address and is equal to 297 /* vlan address was different from the old address and is equal to
294 * the new address */ 298 * the new address */
295 if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && 299 if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
@@ -302,6 +306,7 @@ static void vlan_sync_address(struct net_device *dev,
302 !ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) 306 !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
303 dev_uc_add(dev, vlandev->dev_addr); 307 dev_uc_add(dev, vlandev->dev_addr);
304 308
309out:
305 ether_addr_copy(vlan->real_dev_addr, dev->dev_addr); 310 ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
306} 311}
307 312
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 9d010a09ab98..cc1557978066 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -109,6 +109,8 @@ int vlan_check_real_dev(struct net_device *real_dev,
109void vlan_setup(struct net_device *dev); 109void vlan_setup(struct net_device *dev);
110int register_vlan_dev(struct net_device *dev); 110int register_vlan_dev(struct net_device *dev);
111void unregister_vlan_dev(struct net_device *dev, struct list_head *head); 111void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
112bool vlan_dev_inherit_address(struct net_device *dev,
113 struct net_device *real_dev);
112 114
113static inline u32 vlan_get_ingress_priority(struct net_device *dev, 115static inline u32 vlan_get_ingress_priority(struct net_device *dev,
114 u16 vlan_tci) 116 u16 vlan_tci)
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index e7e62570bdb8..86ae75b77390 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -245,6 +245,17 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
245 strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23); 245 strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
246} 246}
247 247
248bool vlan_dev_inherit_address(struct net_device *dev,
249 struct net_device *real_dev)
250{
251 if (dev->addr_assign_type != NET_ADDR_STOLEN)
252 return false;
253
254 ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
255 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
256 return true;
257}
258
248static int vlan_dev_open(struct net_device *dev) 259static int vlan_dev_open(struct net_device *dev)
249{ 260{
250 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 261 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
@@ -255,7 +266,8 @@ static int vlan_dev_open(struct net_device *dev)
255 !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) 266 !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
256 return -ENETDOWN; 267 return -ENETDOWN;
257 268
258 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) { 269 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) &&
270 !vlan_dev_inherit_address(dev, real_dev)) {
259 err = dev_uc_add(real_dev, dev->dev_addr); 271 err = dev_uc_add(real_dev, dev->dev_addr);
260 if (err < 0) 272 if (err < 0)
261 goto out; 273 goto out;
@@ -560,8 +572,10 @@ static int vlan_dev_init(struct net_device *dev)
560 /* ipv6 shared card related stuff */ 572 /* ipv6 shared card related stuff */
561 dev->dev_id = real_dev->dev_id; 573 dev->dev_id = real_dev->dev_id;
562 574
563 if (is_zero_ether_addr(dev->dev_addr)) 575 if (is_zero_ether_addr(dev->dev_addr)) {
564 eth_hw_addr_inherit(dev, real_dev); 576 ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
577 dev->addr_assign_type = NET_ADDR_STOLEN;
578 }
565 if (is_zero_ether_addr(dev->broadcast)) 579 if (is_zero_ether_addr(dev->broadcast))
566 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 580 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
567 581
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 4fd6af47383a..adb6e3d21b1e 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -124,7 +124,7 @@ as_indicate_complete:
124 break; 124 break;
125 case as_addparty: 125 case as_addparty:
126 case as_dropparty: 126 case as_dropparty:
127 sk->sk_err_soft = msg->reply; 127 sk->sk_err_soft = -msg->reply;
128 /* < 0 failure, otherwise ep_ref */ 128 /* < 0 failure, otherwise ep_ref */
129 clear_bit(ATM_VF_WAITING, &vcc->flags); 129 clear_bit(ATM_VF_WAITING, &vcc->flags);
130 break; 130 break;
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 3fa0a9ee98d1..878563a8354d 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -546,7 +546,7 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
546 schedule(); 546 schedule();
547 } 547 }
548 finish_wait(sk_sleep(sk), &wait); 548 finish_wait(sk_sleep(sk), &wait);
549 error = xchg(&sk->sk_err_soft, 0); 549 error = -xchg(&sk->sk_err_soft, 0);
550out: 550out:
551 release_sock(sk); 551 release_sock(sk);
552 return error; 552 return error;
@@ -573,7 +573,7 @@ static int svc_dropparty(struct socket *sock, int ep_ref)
573 error = -EUNATCH; 573 error = -EUNATCH;
574 goto out; 574 goto out;
575 } 575 }
576 error = xchg(&sk->sk_err_soft, 0); 576 error = -xchg(&sk->sk_err_soft, 0);
577out: 577out:
578 release_sock(sk); 578 release_sock(sk);
579 return error; 579 return error;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 0160d7d09a1e..89469592076c 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1276,9 +1276,9 @@ static bool target_should_be_paused(struct ceph_osd_client *osdc,
1276 const struct ceph_osd_request_target *t, 1276 const struct ceph_osd_request_target *t,
1277 struct ceph_pg_pool_info *pi) 1277 struct ceph_pg_pool_info *pi)
1278{ 1278{
1279 bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); 1279 bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1280 bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || 1280 bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1281 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 1281 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1282 __pool_full(pi); 1282 __pool_full(pi);
1283 1283
1284 WARN_ON(pi->id != t->base_oloc.pool); 1284 WARN_ON(pi->id != t->base_oloc.pool);
@@ -1303,8 +1303,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1303 bool force_resend = false; 1303 bool force_resend = false;
1304 bool need_check_tiering = false; 1304 bool need_check_tiering = false;
1305 bool need_resend = false; 1305 bool need_resend = false;
1306 bool sort_bitwise = ceph_osdmap_flag(osdc->osdmap, 1306 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1307 CEPH_OSDMAP_SORTBITWISE);
1308 enum calc_target_result ct_res; 1307 enum calc_target_result ct_res;
1309 int ret; 1308 int ret;
1310 1309
@@ -1540,9 +1539,9 @@ static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg)
1540 */ 1539 */
1541 msg->hdr.data_off = cpu_to_le16(req->r_data_offset); 1540 msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
1542 1541
1543 dout("%s req %p oid %*pE oid_len %d front %zu data %u\n", __func__, 1542 dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__,
1544 req, req->r_t.target_oid.name_len, req->r_t.target_oid.name, 1543 req, req->r_t.target_oid.name, req->r_t.target_oid.name_len,
1545 req->r_t.target_oid.name_len, msg->front.iov_len, data_len); 1544 msg->front.iov_len, data_len);
1546} 1545}
1547 1546
1548/* 1547/*
@@ -1590,9 +1589,9 @@ static void maybe_request_map(struct ceph_osd_client *osdc)
1590 verify_osdc_locked(osdc); 1589 verify_osdc_locked(osdc);
1591 WARN_ON(!osdc->osdmap->epoch); 1590 WARN_ON(!osdc->osdmap->epoch);
1592 1591
1593 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 1592 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1594 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) || 1593 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
1595 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) { 1594 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
1596 dout("%s osdc %p continuous\n", __func__, osdc); 1595 dout("%s osdc %p continuous\n", __func__, osdc);
1597 continuous = true; 1596 continuous = true;
1598 } else { 1597 } else {
@@ -1629,19 +1628,19 @@ again:
1629 } 1628 }
1630 1629
1631 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 1630 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1632 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) { 1631 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
1633 dout("req %p pausewr\n", req); 1632 dout("req %p pausewr\n", req);
1634 req->r_t.paused = true; 1633 req->r_t.paused = true;
1635 maybe_request_map(osdc); 1634 maybe_request_map(osdc);
1636 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) && 1635 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
1637 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) { 1636 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
1638 dout("req %p pauserd\n", req); 1637 dout("req %p pauserd\n", req);
1639 req->r_t.paused = true; 1638 req->r_t.paused = true;
1640 maybe_request_map(osdc); 1639 maybe_request_map(osdc);
1641 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 1640 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1642 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY | 1641 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
1643 CEPH_OSD_FLAG_FULL_FORCE)) && 1642 CEPH_OSD_FLAG_FULL_FORCE)) &&
1644 (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 1643 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1645 pool_full(osdc, req->r_t.base_oloc.pool))) { 1644 pool_full(osdc, req->r_t.base_oloc.pool))) {
1646 dout("req %p full/pool_full\n", req); 1645 dout("req %p full/pool_full\n", req);
1647 pr_warn_ratelimited("FULL or reached pool quota\n"); 1646 pr_warn_ratelimited("FULL or reached pool quota\n");
@@ -2280,7 +2279,7 @@ static void send_linger_ping(struct ceph_osd_linger_request *lreq)
2280 struct ceph_osd_request *req = lreq->ping_req; 2279 struct ceph_osd_request *req = lreq->ping_req;
2281 struct ceph_osd_req_op *op = &req->r_ops[0]; 2280 struct ceph_osd_req_op *op = &req->r_ops[0];
2282 2281
2283 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) { 2282 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2284 dout("%s PAUSERD\n", __func__); 2283 dout("%s PAUSERD\n", __func__);
2285 return; 2284 return;
2286 } 2285 }
@@ -2893,6 +2892,9 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
2893 dout("req %p tid %llu cb\n", req, req->r_tid); 2892 dout("req %p tid %llu cb\n", req, req->r_tid);
2894 __complete_request(req); 2893 __complete_request(req);
2895 } 2894 }
2895 if (m.flags & CEPH_OSD_FLAG_ONDISK)
2896 complete_all(&req->r_safe_completion);
2897 ceph_osdc_put_request(req);
2896 } else { 2898 } else {
2897 if (req->r_unsafe_callback) { 2899 if (req->r_unsafe_callback) {
2898 dout("req %p tid %llu unsafe-cb\n", req, req->r_tid); 2900 dout("req %p tid %llu unsafe-cb\n", req, req->r_tid);
@@ -2901,10 +2903,7 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
2901 WARN_ON(1); 2903 WARN_ON(1);
2902 } 2904 }
2903 } 2905 }
2904 if (m.flags & CEPH_OSD_FLAG_ONDISK)
2905 complete_all(&req->r_safe_completion);
2906 2906
2907 ceph_osdc_put_request(req);
2908 return; 2907 return;
2909 2908
2910fail_request: 2909fail_request:
@@ -3050,7 +3049,7 @@ static int handle_one_map(struct ceph_osd_client *osdc,
3050 bool skipped_map = false; 3049 bool skipped_map = false;
3051 bool was_full; 3050 bool was_full;
3052 3051
3053 was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL); 3052 was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3054 set_pool_was_full(osdc); 3053 set_pool_was_full(osdc);
3055 3054
3056 if (incremental) 3055 if (incremental)
@@ -3088,7 +3087,7 @@ static int handle_one_map(struct ceph_osd_client *osdc,
3088 osdc->osdmap = newmap; 3087 osdc->osdmap = newmap;
3089 } 3088 }
3090 3089
3091 was_full &= !ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL); 3090 was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3092 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true, 3091 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3093 need_resend, need_resend_linger); 3092 need_resend, need_resend_linger);
3094 3093
@@ -3174,9 +3173,9 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3174 if (ceph_check_fsid(osdc->client, &fsid) < 0) 3173 if (ceph_check_fsid(osdc->client, &fsid) < 0)
3175 goto bad; 3174 goto bad;
3176 3175
3177 was_pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); 3176 was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3178 was_pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || 3177 was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3179 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 3178 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3180 have_pool_full(osdc); 3179 have_pool_full(osdc);
3181 3180
3182 /* incremental maps */ 3181 /* incremental maps */
@@ -3238,9 +3237,9 @@ done:
3238 * we find out when we are no longer full and stop returning 3237 * we find out when we are no longer full and stop returning
3239 * ENOSPC. 3238 * ENOSPC.
3240 */ 3239 */
3241 pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); 3240 pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3242 pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || 3241 pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3243 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || 3242 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3244 have_pool_full(osdc); 3243 have_pool_full(osdc);
3245 if (was_pauserd || was_pausewr || pauserd || pausewr) 3244 if (was_pauserd || was_pausewr || pauserd || pausewr)
3246 maybe_request_map(osdc); 3245 maybe_request_map(osdc);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index cde52e94732f..03062bb763b3 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -1778,8 +1778,8 @@ int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
1778 raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name, 1778 raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
1779 oid->name_len); 1779 oid->name_len);
1780 1780
1781 dout("%s %*pE -> raw_pgid %llu.%x\n", __func__, oid->name_len, 1781 dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name,
1782 oid->name, raw_pgid->pool, raw_pgid->seed); 1782 raw_pgid->pool, raw_pgid->seed);
1783 return 0; 1783 return 0;
1784} 1784}
1785EXPORT_SYMBOL(ceph_object_locator_to_pg); 1785EXPORT_SYMBOL(ceph_object_locator_to_pg);
diff --git a/net/core/hwbm.c b/net/core/hwbm.c
index 941c28486896..2cab489ae62e 100644
--- a/net/core/hwbm.c
+++ b/net/core/hwbm.c
@@ -55,18 +55,21 @@ int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp)
55 spin_lock_irqsave(&bm_pool->lock, flags); 55 spin_lock_irqsave(&bm_pool->lock, flags);
56 if (bm_pool->buf_num == bm_pool->size) { 56 if (bm_pool->buf_num == bm_pool->size) {
57 pr_warn("pool already filled\n"); 57 pr_warn("pool already filled\n");
58 spin_unlock_irqrestore(&bm_pool->lock, flags);
58 return bm_pool->buf_num; 59 return bm_pool->buf_num;
59 } 60 }
60 61
61 if (buf_num + bm_pool->buf_num > bm_pool->size) { 62 if (buf_num + bm_pool->buf_num > bm_pool->size) {
62 pr_warn("cannot allocate %d buffers for pool\n", 63 pr_warn("cannot allocate %d buffers for pool\n",
63 buf_num); 64 buf_num);
65 spin_unlock_irqrestore(&bm_pool->lock, flags);
64 return 0; 66 return 0;
65 } 67 }
66 68
67 if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) { 69 if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) {
68 pr_warn("Adding %d buffers to the %d current buffers will overflow\n", 70 pr_warn("Adding %d buffers to the %d current buffers will overflow\n",
69 buf_num, bm_pool->buf_num); 71 buf_num, bm_pool->buf_num);
72 spin_unlock_irqrestore(&bm_pool->lock, flags);
70 return 0; 73 return 0;
71 } 74 }
72 75
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 8604ae245960..8b02df0d354d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2245,10 +2245,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2245 hrtimer_set_expires(&t.timer, spin_until); 2245 hrtimer_set_expires(&t.timer, spin_until);
2246 2246
2247 remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); 2247 remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
2248 if (remaining <= 0) { 2248 if (remaining <= 0)
2249 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); 2249 goto out;
2250 return;
2251 }
2252 2250
2253 start_time = ktime_get(); 2251 start_time = ktime_get();
2254 if (remaining < 100000) { 2252 if (remaining < 100000) {
@@ -2273,7 +2271,9 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2273 } 2271 }
2274 2272
2275 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); 2273 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
2274out:
2276 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); 2275 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
2276 destroy_hrtimer_on_stack(&t.timer);
2277} 2277}
2278 2278
2279static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) 2279static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index ca207dbf673b..116187b5c267 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -1289,8 +1289,8 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla,
1289 nl802154_dev_addr_policy)) 1289 nl802154_dev_addr_policy))
1290 return -EINVAL; 1290 return -EINVAL;
1291 1291
1292 if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] && 1292 if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] ||
1293 !attrs[NL802154_DEV_ADDR_ATTR_MODE] && 1293 !attrs[NL802154_DEV_ADDR_ATTR_MODE] ||
1294 !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] || 1294 !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
1295 attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])) 1295 attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
1296 return -EINVAL; 1296 return -EINVAL;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 377424ea17a4..d39e9e47a26e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1681,6 +1681,14 @@ static __net_init int inet_init_net(struct net *net)
1681 */ 1681 */
1682 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1); 1682 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
1683 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0); 1683 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
1684
1685 /* Default values for sysctl-controlled parameters.
1686 * We set them here, in case sysctl is not compiled.
1687 */
1688 net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
1689 net->ipv4.sysctl_ip_dynaddr = 0;
1690 net->ipv4.sysctl_ip_early_demux = 1;
1691
1684 return 0; 1692 return 0;
1685} 1693}
1686 1694
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index bb0419582b8d..1cb67de106fe 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -999,10 +999,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
999 if (!net->ipv4.sysctl_local_reserved_ports) 999 if (!net->ipv4.sysctl_local_reserved_ports)
1000 goto err_ports; 1000 goto err_ports;
1001 1001
1002 net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
1003 net->ipv4.sysctl_ip_dynaddr = 0;
1004 net->ipv4.sysctl_ip_early_demux = 1;
1005
1006 return 0; 1002 return 0;
1007 1003
1008err_ports: 1004err_ports:
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 3f8411328de5..2343e4f2e0bf 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -232,6 +232,15 @@ config IPV6_GRE
232 232
233 Saying M here will produce a module called ip6_gre. If unsure, say N. 233 Saying M here will produce a module called ip6_gre. If unsure, say N.
234 234
235config IPV6_FOU
236 tristate
237 default NET_FOU && IPV6
238
239config IPV6_FOU_TUNNEL
240 tristate
241 default NET_FOU_IP_TUNNELS && IPV6_FOU
242 select IPV6_TUNNEL
243
235config IPV6_MULTIPLE_TABLES 244config IPV6_MULTIPLE_TABLES
236 bool "IPv6: Multiple Routing Tables" 245 bool "IPv6: Multiple Routing Tables"
237 select FIB_RULES 246 select FIB_RULES
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 7ec3129c9ace..6d8ea099213e 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -42,7 +42,7 @@ obj-$(CONFIG_IPV6_VTI) += ip6_vti.o
42obj-$(CONFIG_IPV6_SIT) += sit.o 42obj-$(CONFIG_IPV6_SIT) += sit.o
43obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o 43obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
44obj-$(CONFIG_IPV6_GRE) += ip6_gre.o 44obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
45obj-$(CONFIG_NET_FOU) += fou6.o 45obj-$(CONFIG_IPV6_FOU) += fou6.o
46 46
47obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o 47obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o
48obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload) 48obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload)
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
index c972d0b52579..9ea249b9451e 100644
--- a/net/ipv6/fou6.c
+++ b/net/ipv6/fou6.c
@@ -69,7 +69,7 @@ int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
69} 69}
70EXPORT_SYMBOL(gue6_build_header); 70EXPORT_SYMBOL(gue6_build_header);
71 71
72#ifdef CONFIG_NET_FOU_IP_TUNNELS 72#if IS_ENABLED(CONFIG_IPV6_FOU_TUNNEL)
73 73
74static const struct ip6_tnl_encap_ops fou_ip6tun_ops = { 74static const struct ip6_tnl_encap_ops fou_ip6tun_ops = {
75 .encap_hlen = fou_encap_hlen, 75 .encap_hlen = fou_encap_hlen,
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index af503f518278..f4ac2842d4d9 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -712,6 +712,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
712 fl6->daddr = p->raddr; 712 fl6->daddr = p->raddr;
713 fl6->flowi6_oif = p->link; 713 fl6->flowi6_oif = p->link;
714 fl6->flowlabel = 0; 714 fl6->flowlabel = 0;
715 fl6->flowi6_proto = IPPROTO_GRE;
715 716
716 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) 717 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
717 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; 718 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
@@ -1027,6 +1028,8 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
1027 1028
1028 dev->hard_header_len = LL_MAX_HEADER + t_hlen; 1029 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1029 dev->mtu = ETH_DATA_LEN - t_hlen; 1030 dev->mtu = ETH_DATA_LEN - t_hlen;
1031 if (dev->type == ARPHRD_ETHER)
1032 dev->mtu -= ETH_HLEN;
1030 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1033 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1031 dev->mtu -= 8; 1034 dev->mtu -= 8;
1032 1035
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index c6f5df1bed12..6c54e03fe9c1 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -128,6 +128,7 @@ static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
128 */ 128 */
129static int l2tp_ip6_recv(struct sk_buff *skb) 129static int l2tp_ip6_recv(struct sk_buff *skb)
130{ 130{
131 struct net *net = dev_net(skb->dev);
131 struct sock *sk; 132 struct sock *sk;
132 u32 session_id; 133 u32 session_id;
133 u32 tunnel_id; 134 u32 tunnel_id;
@@ -154,7 +155,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
154 } 155 }
155 156
156 /* Ok, this is a data packet. Lookup the session. */ 157 /* Ok, this is a data packet. Lookup the session. */
157 session = l2tp_session_find(&init_net, NULL, session_id); 158 session = l2tp_session_find(net, NULL, session_id);
158 if (session == NULL) 159 if (session == NULL)
159 goto discard; 160 goto discard;
160 161
@@ -188,14 +189,14 @@ pass_up:
188 goto discard; 189 goto discard;
189 190
190 tunnel_id = ntohl(*(__be32 *) &skb->data[4]); 191 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
191 tunnel = l2tp_tunnel_find(&init_net, tunnel_id); 192 tunnel = l2tp_tunnel_find(net, tunnel_id);
192 if (tunnel != NULL) 193 if (tunnel != NULL)
193 sk = tunnel->sock; 194 sk = tunnel->sock;
194 else { 195 else {
195 struct ipv6hdr *iph = ipv6_hdr(skb); 196 struct ipv6hdr *iph = ipv6_hdr(skb);
196 197
197 read_lock_bh(&l2tp_ip6_lock); 198 read_lock_bh(&l2tp_ip6_lock);
198 sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr, 199 sk = __l2tp_ip6_bind_lookup(net, &iph->daddr,
199 0, tunnel_id); 200 0, tunnel_id);
200 read_unlock_bh(&l2tp_ip6_lock); 201 read_unlock_bh(&l2tp_ip6_lock);
201 } 202 }
@@ -263,6 +264,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
263 struct inet_sock *inet = inet_sk(sk); 264 struct inet_sock *inet = inet_sk(sk);
264 struct ipv6_pinfo *np = inet6_sk(sk); 265 struct ipv6_pinfo *np = inet6_sk(sk);
265 struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr; 266 struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
267 struct net *net = sock_net(sk);
266 __be32 v4addr = 0; 268 __be32 v4addr = 0;
267 int addr_type; 269 int addr_type;
268 int err; 270 int err;
@@ -286,7 +288,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
286 288
287 err = -EADDRINUSE; 289 err = -EADDRINUSE;
288 read_lock_bh(&l2tp_ip6_lock); 290 read_lock_bh(&l2tp_ip6_lock);
289 if (__l2tp_ip6_bind_lookup(&init_net, &addr->l2tp_addr, 291 if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr,
290 sk->sk_bound_dev_if, addr->l2tp_conn_id)) 292 sk->sk_bound_dev_if, addr->l2tp_conn_id))
291 goto out_in_use; 293 goto out_in_use;
292 read_unlock_bh(&l2tp_ip6_lock); 294 read_unlock_bh(&l2tp_ip6_lock);
@@ -456,7 +458,7 @@ static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
456 return 0; 458 return 0;
457 459
458drop: 460drop:
459 IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS); 461 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
460 kfree_skb(skb); 462 kfree_skb(skb);
461 return -1; 463 return -1;
462} 464}
diff --git a/net/lapb/lapb_in.c b/net/lapb/lapb_in.c
index 5dba899131b3..182470847fcf 100644
--- a/net/lapb/lapb_in.c
+++ b/net/lapb/lapb_in.c
@@ -444,10 +444,9 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
444 break; 444 break;
445 445
446 case LAPB_FRMR: 446 case LAPB_FRMR:
447 lapb_dbg(1, "(%p) S3 RX FRMR(%d) %02X %02X %02X %02X %02X\n", 447 lapb_dbg(1, "(%p) S3 RX FRMR(%d) %5ph\n",
448 lapb->dev, frame->pf, 448 lapb->dev, frame->pf,
449 skb->data[0], skb->data[1], skb->data[2], 449 skb->data);
450 skb->data[3], skb->data[4]);
451 lapb_establish_data_link(lapb); 450 lapb_establish_data_link(lapb);
452 lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev); 451 lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev);
453 lapb_requeue_frames(lapb); 452 lapb_requeue_frames(lapb);
diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c
index ba4d015bd1a6..482c94d9d958 100644
--- a/net/lapb/lapb_out.c
+++ b/net/lapb/lapb_out.c
@@ -148,9 +148,7 @@ void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *skb, int type)
148 } 148 }
149 } 149 }
150 150
151 lapb_dbg(2, "(%p) S%d TX %02X %02X %02X\n", 151 lapb_dbg(2, "(%p) S%d TX %3ph\n", lapb->dev, lapb->state, skb->data);
152 lapb->dev, lapb->state,
153 skb->data[0], skb->data[1], skb->data[2]);
154 152
155 if (!lapb_data_transmit(lapb, skb)) 153 if (!lapb_data_transmit(lapb, skb))
156 kfree_skb(skb); 154 kfree_skb(skb);
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c
index 9d0a426eccbb..3c1914df641f 100644
--- a/net/lapb/lapb_subr.c
+++ b/net/lapb/lapb_subr.c
@@ -113,9 +113,7 @@ int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb,
113{ 113{
114 frame->type = LAPB_ILLEGAL; 114 frame->type = LAPB_ILLEGAL;
115 115
116 lapb_dbg(2, "(%p) S%d RX %02X %02X %02X\n", 116 lapb_dbg(2, "(%p) S%d RX %3ph\n", lapb->dev, lapb->state, skb->data);
117 lapb->dev, lapb->state,
118 skb->data[0], skb->data[1], skb->data[2]);
119 117
120 /* We always need to look at 2 bytes, sometimes we need 118 /* We always need to look at 2 bytes, sometimes we need
121 * to look at 3 and those cases are handled below. 119 * to look at 3 and those cases are handled below.
@@ -284,10 +282,9 @@ void lapb_transmit_frmr(struct lapb_cb *lapb)
284 dptr++; 282 dptr++;
285 *dptr++ = lapb->frmr_type; 283 *dptr++ = lapb->frmr_type;
286 284
287 lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X %02X %02X\n", 285 lapb_dbg(1, "(%p) S%d TX FRMR %5ph\n",
288 lapb->dev, lapb->state, 286 lapb->dev, lapb->state,
289 skb->data[1], skb->data[2], skb->data[3], 287 &skb->data[1]);
290 skb->data[4], skb->data[5]);
291 } else { 288 } else {
292 dptr = skb_put(skb, 4); 289 dptr = skb_put(skb, 4);
293 *dptr++ = LAPB_FRMR; 290 *dptr++ = LAPB_FRMR;
@@ -299,9 +296,8 @@ void lapb_transmit_frmr(struct lapb_cb *lapb)
299 dptr++; 296 dptr++;
300 *dptr++ = lapb->frmr_type; 297 *dptr++ = lapb->frmr_type;
301 298
302 lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X\n", 299 lapb_dbg(1, "(%p) S%d TX FRMR %3ph\n",
303 lapb->dev, lapb->state, skb->data[1], 300 lapb->dev, lapb->state, &skb->data[1]);
304 skb->data[2], skb->data[3]);
305 } 301 }
306 302
307 lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE); 303 lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE);
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 879185fe183f..9a3eb7a0ebf4 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -137,11 +137,23 @@ static bool is_flow_key_valid(const struct sw_flow_key *key)
137 return !!key->eth.type; 137 return !!key->eth.type;
138} 138}
139 139
140static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
141 __be16 ethertype)
142{
143 if (skb->ip_summed == CHECKSUM_COMPLETE) {
144 __be16 diff[] = { ~(hdr->h_proto), ethertype };
145
146 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
147 ~skb->csum);
148 }
149
150 hdr->h_proto = ethertype;
151}
152
140static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, 153static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
141 const struct ovs_action_push_mpls *mpls) 154 const struct ovs_action_push_mpls *mpls)
142{ 155{
143 __be32 *new_mpls_lse; 156 __be32 *new_mpls_lse;
144 struct ethhdr *hdr;
145 157
146 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */ 158 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
147 if (skb->encapsulation) 159 if (skb->encapsulation)
@@ -160,9 +172,7 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
160 172
161 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN); 173 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
162 174
163 hdr = eth_hdr(skb); 175 update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
164 hdr->h_proto = mpls->mpls_ethertype;
165
166 if (!skb->inner_protocol) 176 if (!skb->inner_protocol)
167 skb_set_inner_protocol(skb, skb->protocol); 177 skb_set_inner_protocol(skb, skb->protocol);
168 skb->protocol = mpls->mpls_ethertype; 178 skb->protocol = mpls->mpls_ethertype;
@@ -193,7 +203,7 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
193 * field correctly in the presence of VLAN tags. 203 * field correctly in the presence of VLAN tags.
194 */ 204 */
195 hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN); 205 hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
196 hdr->h_proto = ethertype; 206 update_ethertype(skb, hdr, ethertype);
197 if (eth_p_mpls(skb->protocol)) 207 if (eth_p_mpls(skb->protocol))
198 skb->protocol = ethertype; 208 skb->protocol = ethertype;
199 209
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 330f14e302e8..b884dae692a1 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -239,6 +239,8 @@ override:
239 police->tcfp_t_c = ktime_get_ns(); 239 police->tcfp_t_c = ktime_get_ns();
240 police->tcf_index = parm->index ? parm->index : 240 police->tcf_index = parm->index ? parm->index :
241 tcf_hash_new_index(tn); 241 tcf_hash_new_index(tn);
242 police->tcf_tm.install = jiffies;
243 police->tcf_tm.lastuse = jiffies;
242 h = tcf_hash(police->tcf_index, POL_TAB_MASK); 244 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
243 spin_lock_bh(&hinfo->lock); 245 spin_lock_bh(&hinfo->lock);
244 hlist_add_head(&police->tcf_head, &hinfo->htab[h]); 246 hlist_add_head(&police->tcf_head, &hinfo->htab[h]);
@@ -268,6 +270,7 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
268 spin_lock(&police->tcf_lock); 270 spin_lock(&police->tcf_lock);
269 271
270 bstats_update(&police->tcf_bstats, skb); 272 bstats_update(&police->tcf_bstats, skb);
273 tcf_lastuse_update(&police->tcf_tm);
271 274
272 if (police->tcfp_ewma_rate && 275 if (police->tcfp_ewma_rate &&
273 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { 276 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
@@ -327,6 +330,7 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
327 .refcnt = police->tcf_refcnt - ref, 330 .refcnt = police->tcf_refcnt - ref,
328 .bindcnt = police->tcf_bindcnt - bind, 331 .bindcnt = police->tcf_bindcnt - bind,
329 }; 332 };
333 struct tcf_t t;
330 334
331 if (police->rate_present) 335 if (police->rate_present)
332 psched_ratecfg_getrate(&opt.rate, &police->rate); 336 psched_ratecfg_getrate(&opt.rate, &police->rate);
@@ -340,6 +344,13 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
340 if (police->tcfp_ewma_rate && 344 if (police->tcfp_ewma_rate &&
341 nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate)) 345 nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate))
342 goto nla_put_failure; 346 goto nla_put_failure;
347
348 t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install);
349 t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse);
350 t.expires = jiffies_to_clock_t(police->tcf_tm.expires);
351 if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
352 goto nla_put_failure;
353
343 return skb->len; 354 return skb->len;
344 355
345nla_put_failure: 356nla_put_failure:
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 64f71a2155f3..ddf047df5361 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -607,6 +607,10 @@ void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool thr
607 if (throttle) 607 if (throttle)
608 qdisc_throttled(wd->qdisc); 608 qdisc_throttled(wd->qdisc);
609 609
610 if (wd->last_expires == expires)
611 return;
612
613 wd->last_expires = expires;
610 hrtimer_start(&wd->timer, 614 hrtimer_start(&wd->timer,
611 ns_to_ktime(expires), 615 ns_to_ktime(expires),
612 HRTIMER_MODE_ABS_PINNED); 616 HRTIMER_MODE_ABS_PINNED);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index f6bf5818ed4d..d4b4218af6b1 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -928,17 +928,10 @@ ok:
928 } 928 }
929 } 929 }
930 qdisc_qstats_overlimit(sch); 930 qdisc_qstats_overlimit(sch);
931 if (likely(next_event > q->now)) { 931 if (likely(next_event > q->now))
932 if (!test_bit(__QDISC_STATE_DEACTIVATED, 932 qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true);
933 &qdisc_root_sleeping(q->watchdog.qdisc)->state)) { 933 else
934 ktime_t time = ns_to_ktime(next_event);
935 qdisc_throttled(q->watchdog.qdisc);
936 hrtimer_start(&q->watchdog.timer, time,
937 HRTIMER_MODE_ABS_PINNED);
938 }
939 } else {
940 schedule_work(&q->work); 934 schedule_work(&q->work);
941 }
942fin: 935fin:
943 return skb; 936 return skb;
944} 937}
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index 8e3e769dc9ea..1ce724b87618 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -356,6 +356,9 @@ static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
356 if (cb->args[4] < cb->args[1]) 356 if (cb->args[4] < cb->args[1])
357 goto next; 357 goto next;
358 358
359 if ((r->idiag_states & ~TCPF_LISTEN) && !list_empty(&ep->asocs))
360 goto next;
361
359 if (r->sdiag_family != AF_UNSPEC && 362 if (r->sdiag_family != AF_UNSPEC &&
360 sk->sk_family != r->sdiag_family) 363 sk->sk_family != r->sdiag_family)
361 goto next; 364 goto next;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 777d0324594a..67154b848aa9 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4220,6 +4220,7 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
4220 info->sctpi_s_disable_fragments = sp->disable_fragments; 4220 info->sctpi_s_disable_fragments = sp->disable_fragments;
4221 info->sctpi_s_v4mapped = sp->v4mapped; 4221 info->sctpi_s_v4mapped = sp->v4mapped;
4222 info->sctpi_s_frag_interleave = sp->frag_interleave; 4222 info->sctpi_s_frag_interleave = sp->frag_interleave;
4223 info->sctpi_s_type = sp->type;
4223 4224
4224 return 0; 4225 return 0;
4225 } 4226 }
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 4dfc5c14f8c3..f795b1dd0ccd 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -346,9 +346,15 @@ static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg,
346 struct nlattr **attrs) 346 struct nlattr **attrs)
347{ 347{
348 struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1]; 348 struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1];
349 int err;
350
351 if (!attrs[TIPC_NLA_BEARER])
352 return -EINVAL;
349 353
350 nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX, attrs[TIPC_NLA_BEARER], 354 err = nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX,
351 NULL); 355 attrs[TIPC_NLA_BEARER], NULL);
356 if (err)
357 return err;
352 358
353 return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME, 359 return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME,
354 nla_data(bearer[TIPC_NLA_BEARER_NAME]), 360 nla_data(bearer[TIPC_NLA_BEARER_NAME]),
@@ -460,14 +466,31 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
460 struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; 466 struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
461 struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; 467 struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
462 struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; 468 struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
469 int err;
463 470
464 nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL); 471 if (!attrs[TIPC_NLA_LINK])
472 return -EINVAL;
465 473
466 nla_parse_nested(prop, TIPC_NLA_PROP_MAX, link[TIPC_NLA_LINK_PROP], 474 err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
467 NULL); 475 NULL);
476 if (err)
477 return err;
478
479 if (!link[TIPC_NLA_LINK_PROP])
480 return -EINVAL;
468 481
469 nla_parse_nested(stats, TIPC_NLA_STATS_MAX, link[TIPC_NLA_LINK_STATS], 482 err = nla_parse_nested(prop, TIPC_NLA_PROP_MAX,
470 NULL); 483 link[TIPC_NLA_LINK_PROP], NULL);
484 if (err)
485 return err;
486
487 if (!link[TIPC_NLA_LINK_STATS])
488 return -EINVAL;
489
490 err = nla_parse_nested(stats, TIPC_NLA_STATS_MAX,
491 link[TIPC_NLA_LINK_STATS], NULL);
492 if (err)
493 return err;
471 494
472 name = (char *)TLV_DATA(msg->req); 495 name = (char *)TLV_DATA(msg->req);
473 if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) 496 if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
@@ -569,8 +592,15 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
569{ 592{
570 struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; 593 struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
571 struct tipc_link_info link_info; 594 struct tipc_link_info link_info;
595 int err;
572 596
573 nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL); 597 if (!attrs[TIPC_NLA_LINK])
598 return -EINVAL;
599
600 err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
601 NULL);
602 if (err)
603 return err;
574 604
575 link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); 605 link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
576 link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); 606 link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
@@ -758,12 +788,23 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg,
758 u32 node, depth, type, lowbound, upbound; 788 u32 node, depth, type, lowbound, upbound;
759 static const char * const scope_str[] = {"", " zone", " cluster", 789 static const char * const scope_str[] = {"", " zone", " cluster",
760 " node"}; 790 " node"};
791 int err;
761 792
762 nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX, 793 if (!attrs[TIPC_NLA_NAME_TABLE])
763 attrs[TIPC_NLA_NAME_TABLE], NULL); 794 return -EINVAL;
764 795
765 nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, nt[TIPC_NLA_NAME_TABLE_PUBL], 796 err = nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX,
766 NULL); 797 attrs[TIPC_NLA_NAME_TABLE], NULL);
798 if (err)
799 return err;
800
801 if (!nt[TIPC_NLA_NAME_TABLE_PUBL])
802 return -EINVAL;
803
804 err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX,
805 nt[TIPC_NLA_NAME_TABLE_PUBL], NULL);
806 if (err)
807 return err;
767 808
768 ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); 809 ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
769 810
@@ -815,8 +856,15 @@ static int __tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg,
815{ 856{
816 u32 type, lower, upper; 857 u32 type, lower, upper;
817 struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1]; 858 struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1];
859 int err;
818 860
819 nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL], NULL); 861 if (!attrs[TIPC_NLA_PUBL])
862 return -EINVAL;
863
864 err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL],
865 NULL);
866 if (err)
867 return err;
820 868
821 type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]); 869 type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]);
822 lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]); 870 lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]);
@@ -876,7 +924,13 @@ static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg,
876 u32 sock_ref; 924 u32 sock_ref;
877 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; 925 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
878 926
879 nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], NULL); 927 if (!attrs[TIPC_NLA_SOCK])
928 return -EINVAL;
929
930 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK],
931 NULL);
932 if (err)
933 return err;
880 934
881 sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); 935 sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
882 tipc_tlv_sprintf(msg->rep, "%u:", sock_ref); 936 tipc_tlv_sprintf(msg->rep, "%u:", sock_ref);
@@ -917,9 +971,15 @@ static int tipc_nl_compat_media_dump(struct tipc_nl_compat_msg *msg,
917 struct nlattr **attrs) 971 struct nlattr **attrs)
918{ 972{
919 struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1]; 973 struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1];
974 int err;
975
976 if (!attrs[TIPC_NLA_MEDIA])
977 return -EINVAL;
920 978
921 nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA], 979 err = nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA],
922 NULL); 980 NULL);
981 if (err)
982 return err;
923 983
924 return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME, 984 return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME,
925 nla_data(media[TIPC_NLA_MEDIA_NAME]), 985 nla_data(media[TIPC_NLA_MEDIA_NAME]),
@@ -931,8 +991,15 @@ static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg,
931{ 991{
932 struct tipc_node_info node_info; 992 struct tipc_node_info node_info;
933 struct nlattr *node[TIPC_NLA_NODE_MAX + 1]; 993 struct nlattr *node[TIPC_NLA_NODE_MAX + 1];
994 int err;
934 995
935 nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], NULL); 996 if (!attrs[TIPC_NLA_NODE])
997 return -EINVAL;
998
999 err = nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE],
1000 NULL);
1001 if (err)
1002 return err;
936 1003
937 node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR])); 1004 node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR]));
938 node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP])); 1005 node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP]));
@@ -971,8 +1038,16 @@ static int tipc_nl_compat_net_dump(struct tipc_nl_compat_msg *msg,
971{ 1038{
972 __be32 id; 1039 __be32 id;
973 struct nlattr *net[TIPC_NLA_NET_MAX + 1]; 1040 struct nlattr *net[TIPC_NLA_NET_MAX + 1];
1041 int err;
1042
1043 if (!attrs[TIPC_NLA_NET])
1044 return -EINVAL;
1045
1046 err = nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET],
1047 NULL);
1048 if (err)
1049 return err;
974 1050
975 nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET], NULL);
976 id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID])); 1051 id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID]));
977 1052
978 return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id)); 1053 return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id));
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 6750595bd7b8..4904ced676d4 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2454,6 +2454,7 @@ sub process {
2454 2454
2455# Check for git id commit length and improperly formed commit descriptions 2455# Check for git id commit length and improperly formed commit descriptions
2456 if ($in_commit_log && !$commit_log_possible_stack_dump && 2456 if ($in_commit_log && !$commit_log_possible_stack_dump &&
2457 $line !~ /^\s*(?:Link|Patchwork|http|BugLink):/i &&
2457 ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i || 2458 ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i ||
2458 ($line =~ /\b[0-9a-f]{12,40}\b/i && 2459 ($line =~ /\b[0-9a-f]{12,40}\b/i &&
2459 $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i && 2460 $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i &&
diff --git a/security/keys/compat.c b/security/keys/compat.c
index c8783b3b628c..36c80bf5b89c 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -134,7 +134,7 @@ COMPAT_SYSCALL_DEFINE5(keyctl, u32, option,
134 134
135 case KEYCTL_DH_COMPUTE: 135 case KEYCTL_DH_COMPUTE:
136 return keyctl_dh_compute(compat_ptr(arg2), compat_ptr(arg3), 136 return keyctl_dh_compute(compat_ptr(arg2), compat_ptr(arg3),
137 arg4); 137 arg4, compat_ptr(arg5));
138 138
139 default: 139 default:
140 return -EOPNOTSUPP; 140 return -EOPNOTSUPP;
diff --git a/security/keys/dh.c b/security/keys/dh.c
index 880505a4b9f1..531ed2ec132f 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -78,7 +78,8 @@ error:
78} 78}
79 79
80long keyctl_dh_compute(struct keyctl_dh_params __user *params, 80long keyctl_dh_compute(struct keyctl_dh_params __user *params,
81 char __user *buffer, size_t buflen) 81 char __user *buffer, size_t buflen,
82 void __user *reserved)
82{ 83{
83 long ret; 84 long ret;
84 MPI base, private, prime, result; 85 MPI base, private, prime, result;
@@ -97,6 +98,11 @@ long keyctl_dh_compute(struct keyctl_dh_params __user *params,
97 goto out; 98 goto out;
98 } 99 }
99 100
101 if (reserved) {
102 ret = -EINVAL;
103 goto out;
104 }
105
100 keylen = mpi_from_key(pcopy.prime, buflen, &prime); 106 keylen = mpi_from_key(pcopy.prime, buflen, &prime);
101 if (keylen < 0 || !prime) { 107 if (keylen < 0 || !prime) {
102 /* buflen == 0 may be used to query the required buffer size, 108 /* buflen == 0 may be used to query the required buffer size,
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 8ec7a528365d..a705a7d92ad7 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -260,10 +260,11 @@ static inline long keyctl_get_persistent(uid_t uid, key_serial_t destring)
260 260
261#ifdef CONFIG_KEY_DH_OPERATIONS 261#ifdef CONFIG_KEY_DH_OPERATIONS
262extern long keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *, 262extern long keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *,
263 size_t); 263 size_t, void __user *);
264#else 264#else
265static inline long keyctl_dh_compute(struct keyctl_dh_params __user *params, 265static inline long keyctl_dh_compute(struct keyctl_dh_params __user *params,
266 char __user *buffer, size_t buflen) 266 char __user *buffer, size_t buflen,
267 void __user *reserved)
267{ 268{
268 return -EOPNOTSUPP; 269 return -EOPNOTSUPP;
269} 270}
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 3b135a0af344..d580ad06b792 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1688,8 +1688,8 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
1688 1688
1689 case KEYCTL_DH_COMPUTE: 1689 case KEYCTL_DH_COMPUTE:
1690 return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2, 1690 return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2,
1691 (char __user *) arg3, 1691 (char __user *) arg3, (size_t) arg4,
1692 (size_t) arg4); 1692 (void __user *) arg5);
1693 1693
1694 default: 1694 default:
1695 return -EOPNOTSUPP; 1695 return -EOPNOTSUPP;
diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/omap/omap-hdmi-audio.c
index 64425d352962..888133f9e65d 100644
--- a/sound/soc/omap/omap-hdmi-audio.c
+++ b/sound/soc/omap/omap-hdmi-audio.c
@@ -28,7 +28,6 @@
28#include <sound/asoundef.h> 28#include <sound/asoundef.h>
29#include <sound/omap-pcm.h> 29#include <sound/omap-pcm.h>
30#include <sound/omap-hdmi-audio.h> 30#include <sound/omap-hdmi-audio.h>
31#include <video/omapdss.h>
32 31
33#define DRV_NAME "omap-hdmi-audio" 32#define DRV_NAME "omap-hdmi-audio"
34 33
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
index a3f12b3b277b..3a3a699b7489 100644
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -100,12 +100,11 @@ static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
100 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) 100 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
101 continue; 101 continue;
102 102
103 if (cpu_if->vgic_elrsr & (1UL << i)) { 103 if (cpu_if->vgic_elrsr & (1UL << i))
104 cpu_if->vgic_lr[i] &= ~GICH_LR_STATE; 104 cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
105 continue; 105 else
106 } 106 cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
107 107
108 cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
109 writel_relaxed(0, base + GICH_LR0 + (i * 4)); 108 writel_relaxed(0, base + GICH_LR0 + (i * 4));
110 } 109 }
111} 110}
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 059595ec3da0..9f6fab74dce7 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -191,10 +191,8 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
191 * other thread sync back the IRQ. 191 * other thread sync back the IRQ.
192 */ 192 */
193 while (irq->vcpu && /* IRQ may have state in an LR somewhere */ 193 while (irq->vcpu && /* IRQ may have state in an LR somewhere */
194 irq->vcpu->cpu != -1) { /* VCPU thread is running */ 194 irq->vcpu->cpu != -1) /* VCPU thread is running */
195 BUG_ON(irq->intid < VGIC_NR_PRIVATE_IRQS);
196 cond_resched_lock(&irq->irq_lock); 195 cond_resched_lock(&irq->irq_lock);
197 }
198 196
199 irq->active = new_active_state; 197 irq->active = new_active_state;
200 if (new_active_state) 198 if (new_active_state)
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 8ad42c217770..e31405ee5515 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -112,11 +112,15 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
112 } 112 }
113 } 113 }
114 114
115 /* Clear soft pending state when level IRQs have been acked */ 115 /*
116 if (irq->config == VGIC_CONFIG_LEVEL && 116 * Clear soft pending state when level irqs have been acked.
117 !(val & GICH_LR_PENDING_BIT)) { 117 * Always regenerate the pending state.
118 irq->soft_pending = false; 118 */
119 irq->pending = irq->line_level; 119 if (irq->config == VGIC_CONFIG_LEVEL) {
120 if (!(val & GICH_LR_PENDING_BIT))
121 irq->soft_pending = false;
122
123 irq->pending = irq->line_level || irq->soft_pending;
120 } 124 }
121 125
122 spin_unlock(&irq->irq_lock); 126 spin_unlock(&irq->irq_lock);
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 336a46115937..346b4ad12b49 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -101,11 +101,15 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
101 } 101 }
102 } 102 }
103 103
104 /* Clear soft pending state when level irqs have been acked */ 104 /*
105 if (irq->config == VGIC_CONFIG_LEVEL && 105 * Clear soft pending state when level irqs have been acked.
106 !(val & ICH_LR_PENDING_BIT)) { 106 * Always regenerate the pending state.
107 irq->soft_pending = false; 107 */
108 irq->pending = irq->line_level; 108 if (irq->config == VGIC_CONFIG_LEVEL) {
109 if (!(val & ICH_LR_PENDING_BIT))
110 irq->soft_pending = false;
111
112 irq->pending = irq->line_level || irq->soft_pending;
109 } 113 }
110 114
111 spin_unlock(&irq->irq_lock); 115 spin_unlock(&irq->irq_lock);
diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
index fe84e1a95dd5..8db197bb6c7a 100644
--- a/virt/kvm/irqchip.c
+++ b/virt/kvm/irqchip.c
@@ -40,7 +40,7 @@ int kvm_irq_map_gsi(struct kvm *kvm,
40 40
41 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, 41 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
42 lockdep_is_held(&kvm->irq_lock)); 42 lockdep_is_held(&kvm->irq_lock));
43 if (gsi < irq_rt->nr_rt_entries) { 43 if (irq_rt && gsi < irq_rt->nr_rt_entries) {
44 hlist_for_each_entry(e, &irq_rt->map[gsi], link) { 44 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
45 entries[n] = *e; 45 entries[n] = *e;
46 ++n; 46 ++n;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 37af23052470..02e98f3131bd 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2935,7 +2935,7 @@ static long kvm_vm_ioctl(struct file *filp,
2935 case KVM_SET_GSI_ROUTING: { 2935 case KVM_SET_GSI_ROUTING: {
2936 struct kvm_irq_routing routing; 2936 struct kvm_irq_routing routing;
2937 struct kvm_irq_routing __user *urouting; 2937 struct kvm_irq_routing __user *urouting;
2938 struct kvm_irq_routing_entry *entries; 2938 struct kvm_irq_routing_entry *entries = NULL;
2939 2939
2940 r = -EFAULT; 2940 r = -EFAULT;
2941 if (copy_from_user(&routing, argp, sizeof(routing))) 2941 if (copy_from_user(&routing, argp, sizeof(routing)))
@@ -2945,15 +2945,17 @@ static long kvm_vm_ioctl(struct file *filp,
2945 goto out; 2945 goto out;
2946 if (routing.flags) 2946 if (routing.flags)
2947 goto out; 2947 goto out;
2948 r = -ENOMEM; 2948 if (routing.nr) {
2949 entries = vmalloc(routing.nr * sizeof(*entries)); 2949 r = -ENOMEM;
2950 if (!entries) 2950 entries = vmalloc(routing.nr * sizeof(*entries));
2951 goto out; 2951 if (!entries)
2952 r = -EFAULT; 2952 goto out;
2953 urouting = argp; 2953 r = -EFAULT;
2954 if (copy_from_user(entries, urouting->entries, 2954 urouting = argp;
2955 routing.nr * sizeof(*entries))) 2955 if (copy_from_user(entries, urouting->entries,
2956 goto out_free_irq_routing; 2956 routing.nr * sizeof(*entries)))
2957 goto out_free_irq_routing;
2958 }
2957 r = kvm_set_irq_routing(kvm, entries, routing.nr, 2959 r = kvm_set_irq_routing(kvm, entries, routing.nr,
2958 routing.flags); 2960 routing.flags);
2959out_free_irq_routing: 2961out_free_irq_routing: