aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 20:36:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 20:36:12 -0400
commita7d7a143d0b4cb1914705884ca5c25e322dba693 (patch)
tree0ee5e9e43f0863b38a29e8abc293e80eab177d74
parent43c40df2c7fedce640a6c39fcdf58764f6bbac5c (diff)
parent7963e9db1b1f842fdc53309baa8714d38e9f5681 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull DRM updates from Dave Airlie: "Like all good pull reqs this ends with a revert, so it must mean we tested it, [ Ed. That's _one_ way of looking at it ] This pull is missing nouveau, Ben has been stuck trying to track down a very longstanding bug that revealed itself due to some other changes. I've asked him to send you a direct pull request for nouveau once he cleans things up. I'm away until Monday so don't want to delay things, you can make a decision on that when he sends it, I have my phone so I can ack things just not really merge much. It has one trivial conflict with your tree in armada_drv.c, and also the pull request contains some component changes that are already in your tree, the base tree from Russell went via Greg's tree already, but some stuff still shows up in here that doesn't when I merge my tree into yours. Otherwise all pretty standard graphics fare, one new driver and changes all over the place. New drivers: - sti kms driver for STMicroelectronics chipsets stih416 and stih407. core: - lots of cleanups to the drm core - DP MST helper code merged - universal cursor planes. - render nodes enabled by default panel: - better panel interfaces - new panel support - non-continuous cock advertising ability ttm: - shrinker fixes i915: - hopefully ditched UMS support - runtime pm fixes - psr tracking and locking - now enabled by default - userptr fixes - backlight brightness fixes - MST support merged - runtime PM for dpms - primary planes locking fixes - gen8 hw semaphore support - fbc fixes - runtime PM on SOix sleep state hw. - mmio base page flipping - lots of vlv/chv fixes. - universal cursor planes radeon: - Hawaii fixes - display scalar support for non-fixed mode displays - new firmware format support - dpm on more asics by default - GPUVM improvements - uncached and wc GTT buffers - BOs > visible VRAM exynos: - i80 interface support - module auto-loading - ipp driver consolidated. armada: - irq handling in crtc layer only - crtc renumbering - add component support - DT interaction changes. tegra: - load as module fixes - eDP bpp and sync polarity fixed - DSI non-continuous clock mode support - better support for importing buffers from nouveau msm: - mdp5/adq8084 v1.3 hw enablement - devicetree clk changse - ifc6410 board working tda998x: - component support - DT documentation update vmwgfx: - fix compat shader namespace" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (551 commits) Revert "drm: drop redundant drm_file->is_master" drm/panel: simple: Use devm_gpiod_get_optional() drm/dsi: Replace upcasting macro by function drm/panel: ld9040: Replace upcasting macro by function drm/exynos: dp: Modify driver to support drm_panel drm/exynos: Move DP setup into commit() drm/panel: simple: Add AUO B133HTN01 panel support drm/panel: simple: Support delays in panel functions drm/panel: simple: Add proper definition for prepare and unprepare drm/panel: s6e8aa0: Add proper definition for prepare and unprepare drm/panel: ld9040: Add proper definition for prepare and unprepare drm/tegra: Add support for panel prepare and unprepare routines drm/exynos: dsi: Add support for panel prepare and unprepare routines drm/exynos: dpi: Add support for panel prepare and unprepare routines drm/panel: simple: Add dummy prepare and unprepare routines drm/panel: s6e8aa0: Add dummy prepare and unprepare routines drm/panel: ld9040: Add dummy prepare and unprepare routines drm/panel: Provide convenience wrapper for .get_modes() drm/panel: add .prepare() and .unprepare() functions drm/panel: simple: Remove simple-panel compatible ...
-rw-r--r--Documentation/DocBook/drm.tmpl89
-rw-r--r--Documentation/devicetree/bindings/drm/armada/marvell,dove-lcd.txt30
-rw-r--r--Documentation/devicetree/bindings/drm/i2c/tda998x.txt2
-rw-r--r--Documentation/devicetree/bindings/drm/msm/gpu.txt52
-rw-r--r--Documentation/devicetree/bindings/drm/msm/hdmi.txt46
-rw-r--r--Documentation/devicetree/bindings/drm/msm/mdp.txt48
-rw-r--r--Documentation/devicetree/bindings/gpu/st,stih4xx.txt189
-rw-r--r--Documentation/devicetree/bindings/panel/auo,b133htn01.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/foxlink,fl500wvr00-a0t.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/innolux,n116bge.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/innolux,n156bge-l21.txt7
-rw-r--r--Documentation/devicetree/bindings/video/exynos_dsim.txt4
-rw-r--r--Documentation/devicetree/bindings/video/exynos_mixer.txt5
-rw-r--r--Documentation/devicetree/bindings/video/samsung-fimd.txt28
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos5.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi20
-rw-r--r--drivers/char/agp/frontend.c15
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/Makefile8
-rw-r--r--drivers/gpu/drm/armada/armada_510.c23
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c187
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h11
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h13
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c245
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c4
-rw-r--r--drivers/gpu/drm/armada/armada_output.c4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c6
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c16
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c7
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c17
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c2
-rw-r--r--drivers/gpu/drm/bridge/ptn3460.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c14
-rw-r--r--drivers/gpu/drm/drm_buffer.c6
-rw-r--r--drivers/gpu/drm/drm_bufs.c17
-rw-r--r--drivers/gpu/drm/drm_context.c102
-rw-r--r--drivers/gpu/drm/drm_crtc.c604
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_debugfs.c182
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c2715
-rw-r--r--drivers/gpu/drm/drm_drv.c1190
-rw-r--r--drivers/gpu/drm/drm_edid.c11
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c5
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c115
-rw-r--r--drivers/gpu/drm/drm_fops.c85
-rw-r--r--drivers/gpu/drm/drm_gem.c29
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_info.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c372
-rw-r--r--drivers/gpu/drm/drm_legacy.h51
-rw-r--r--drivers/gpu/drm/drm_lock.c1
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c14
-rw-r--r--drivers/gpu/drm/drm_of.c67
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c7
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c9
-rw-r--r--drivers/gpu/drm/drm_rect.c140
-rw-r--r--drivers/gpu/drm/drm_stub.c805
-rw-r--r--drivers/gpu/drm/drm_sysfs.c92
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c117
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c285
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c277
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c259
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c38
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c57
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c4
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c5
-rw-r--r--drivers/gpu/drm/gma500/gtt.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c4
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c4
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c390
-rw-r--r--drivers/gpu/drm/i915/Kconfig12
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c425
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c73
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c74
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h189
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c93
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c149
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c308
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c55
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c161
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c104
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c149
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c89
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c624
-rw-r--r--drivers/gpu/drm/i915/i915_params.c15
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h500
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c57
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c38
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c474
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1459
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c678
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c548
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h154
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c28
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c10
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c37
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c102
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c54
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c20
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c3
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c22
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c160
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c897
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate.h2
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen6.c1
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen7.c1
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen8.c1
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c444
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h90
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c10
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c36
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c212
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c16
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h58
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h296
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h5
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h56
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h239
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c69
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h109
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c27
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c377
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c25
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h431
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c159
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h25
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h4
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c91
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c49
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c17
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c12
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c31
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c33
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c21
-rw-r--r--drivers/gpu/drm/panel/Kconfig7
-rw-r--r--drivers/gpu/drm/panel/panel-ld9040.c21
-rw-r--r--drivers/gpu/drm/panel/panel-s6e8aa0.c29
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c203
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h4
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c16
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c13
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c39
-rw-r--r--drivers/gpu/drm/radeon/cik.c722
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c247
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c16
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c18
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c1
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c178
-rw-r--r--drivers/gpu/drm/radeon/r100.c62
-rw-r--r--drivers/gpu/drm/radeon/r300.c13
-rw-r--r--drivers/gpu/drm/radeon/r600.c24
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h127
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c79
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h77
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c388
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c87
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c72
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c58
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c319
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c63
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c287
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h22
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.c167
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h71
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c278
-rw-r--r--drivers/gpu/drm/radeon/rs400.c14
-rw-r--r--drivers/gpu/drm/radeon/rs600.c17
-rw-r--r--drivers/gpu/drm/radeon/rv770.c1
-rw-r--r--drivers/gpu/drm/radeon/si.c460
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c172
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c152
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.h5
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c62
-rw-r--r--drivers/gpu/drm/radeon/sislands_smc.h3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c4
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c6
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c2
-rw-r--r--drivers/gpu/drm/sti/Kconfig14
-rw-r--r--drivers/gpu/drm/sti/Makefile21
-rw-r--r--drivers/gpu/drm/sti/NOTES58
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c281
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h90
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.c421
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.h22
-rw-r--r--drivers/gpu/drm/sti/sti_drm_drv.c241
-rw-r--r--drivers/gpu/drm/sti/sti_drm_drv.h29
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.c195
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.h18
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c549
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.h16
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c794
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c810
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h88
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c336
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h14
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c211
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h14
-rw-r--r--drivers/gpu/drm/sti/sti_layer.c197
-rw-r--r--drivers/gpu/drm/sti/sti_layer.h123
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c249
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h54
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c648
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c138
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h12
-rw-r--r--drivers/gpu/drm/sti/sti_vtac.c215
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c366
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.h28
-rw-r--r--drivers/gpu/drm/tegra/dc.c123
-rw-r--r--drivers/gpu/drm/tegra/dc.h5
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c1
-rw-r--r--drivers/gpu/drm/tegra/drm.c216
-rw-r--r--drivers/gpu/drm/tegra/drm.h11
-rw-r--r--drivers/gpu/drm/tegra/dsi.c4
-rw-r--r--drivers/gpu/drm/tegra/fb.c66
-rw-r--r--drivers/gpu/drm/tegra/gem.c5
-rw-r--r--drivers/gpu/drm/tegra/gem.h16
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c1
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c1
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c1
-rw-r--r--drivers/gpu/drm/tegra/output.c6
-rw-r--r--drivers/gpu/drm/tegra/sor.c21
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c15
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c41
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c29
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c37
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c20
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c34
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c13
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c5
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c8
-rw-r--r--drivers/gpu/drm/udl/udl_main.c15
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c341
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h74
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c227
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c396
-rw-r--r--drivers/gpu/host1x/job.c22
-rw-r--r--drivers/gpu/vga/vgaarb.c40
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c6
-rw-r--r--include/drm/drmP.h89
-rw-r--r--include/drm/drm_crtc.h36
-rw-r--r--include/drm/drm_dp_mst_helper.h509
-rw-r--r--include/drm/drm_fb_helper.h8
-rw-r--r--include/drm/drm_mipi_dsi.h21
-rw-r--r--include/drm/drm_of.h18
-rw-r--r--include/drm/drm_panel.h58
-rw-r--r--include/drm/drm_rect.h6
-rw-r--r--include/drm/ttm/ttm_bo_driver.h14
-rw-r--r--include/linux/host1x.h15
-rw-r--r--include/uapi/drm/drm.h2
-rw-r--r--include/uapi/drm/drm_mode.h5
-rw-r--r--include/uapi/drm/radeon_drm.h4
-rw-r--r--include/uapi/drm/tegra_drm.h46
-rw-r--r--include/video/samsung_fimd.h3
348 files changed, 26631 insertions, 7461 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 7df3134ebc0e..1d3756d3176c 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -1610,7 +1610,7 @@ int max_width, max_height;</synopsis>
1610 The connector is then registered with a call to 1610 The connector is then registered with a call to
1611 <function>drm_connector_init</function> with a pointer to the connector 1611 <function>drm_connector_init</function> with a pointer to the connector
1612 functions and a connector type, and exposed through sysfs with a call to 1612 functions and a connector type, and exposed through sysfs with a call to
1613 <function>drm_sysfs_connector_add</function>. 1613 <function>drm_connector_register</function>.
1614 </para> 1614 </para>
1615 <para> 1615 <para>
1616 Supported connector types are 1616 Supported connector types are
@@ -1768,7 +1768,7 @@ int max_width, max_height;</synopsis>
1768 (<function>drm_encoder_cleanup</function>) and connectors 1768 (<function>drm_encoder_cleanup</function>) and connectors
1769 (<function>drm_connector_cleanup</function>). Furthermore, connectors 1769 (<function>drm_connector_cleanup</function>). Furthermore, connectors
1770 that have been added to sysfs must be removed by a call to 1770 that have been added to sysfs must be removed by a call to
1771 <function>drm_sysfs_connector_remove</function> before calling 1771 <function>drm_connector_unregister</function> before calling
1772 <function>drm_connector_cleanup</function>. 1772 <function>drm_connector_cleanup</function>.
1773 </para> 1773 </para>
1774 <para> 1774 <para>
@@ -1813,7 +1813,7 @@ void intel_crt_init(struct drm_device *dev)
1813 drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs); 1813 drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
1814 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 1814 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
1815 1815
1816 drm_sysfs_connector_add(connector); 1816 drm_connector_register(connector);
1817}]]></programlisting> 1817}]]></programlisting>
1818 <para> 1818 <para>
1819 In the example above (taken from the i915 driver), a CRTC, connector and 1819 In the example above (taken from the i915 driver), a CRTC, connector and
@@ -2338,6 +2338,12 @@ void intel_crt_init(struct drm_device *dev)
2338!Edrivers/gpu/drm/drm_dp_helper.c 2338!Edrivers/gpu/drm/drm_dp_helper.c
2339 </sect2> 2339 </sect2>
2340 <sect2> 2340 <sect2>
2341 <title>Display Port MST Helper Functions Reference</title>
2342!Pdrivers/gpu/drm/drm_dp_mst_topology.c dp mst helper
2343!Iinclude/drm/drm_dp_mst_helper.h
2344!Edrivers/gpu/drm/drm_dp_mst_topology.c
2345 </sect2>
2346 <sect2>
2341 <title>EDID Helper Functions Reference</title> 2347 <title>EDID Helper Functions Reference</title>
2342!Edrivers/gpu/drm/drm_edid.c 2348!Edrivers/gpu/drm/drm_edid.c
2343 </sect2> 2349 </sect2>
@@ -2502,7 +2508,7 @@ void intel_crt_init(struct drm_device *dev)
2502 <td valign="top" >Description/Restrictions</td> 2508 <td valign="top" >Description/Restrictions</td>
2503 </tr> 2509 </tr>
2504 <tr> 2510 <tr>
2505 <td rowspan="20" valign="top" >DRM</td> 2511 <td rowspan="21" valign="top" >DRM</td>
2506 <td rowspan="2" valign="top" >Generic</td> 2512 <td rowspan="2" valign="top" >Generic</td>
2507 <td valign="top" >“EDID”</td> 2513 <td valign="top" >“EDID”</td>
2508 <td valign="top" >BLOB | IMMUTABLE</td> 2514 <td valign="top" >BLOB | IMMUTABLE</td>
@@ -2633,7 +2639,7 @@ void intel_crt_init(struct drm_device *dev)
2633 <td valign="top" >TBD</td> 2639 <td valign="top" >TBD</td>
2634 </tr> 2640 </tr>
2635 <tr> 2641 <tr>
2636 <td rowspan="2" valign="top" >Optional</td> 2642 <td rowspan="3" valign="top" >Optional</td>
2637 <td valign="top" >“scaling mode”</td> 2643 <td valign="top" >“scaling mode”</td>
2638 <td valign="top" >ENUM</td> 2644 <td valign="top" >ENUM</td>
2639 <td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td> 2645 <td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
@@ -2641,6 +2647,15 @@ void intel_crt_init(struct drm_device *dev)
2641 <td valign="top" >TBD</td> 2647 <td valign="top" >TBD</td>
2642 </tr> 2648 </tr>
2643 <tr> 2649 <tr>
2650 <td valign="top" >"aspect ratio"</td>
2651 <td valign="top" >ENUM</td>
2652 <td valign="top" >{ "None", "4:3", "16:9" }</td>
2653 <td valign="top" >Connector</td>
2654 <td valign="top" >DRM property to set aspect ratio from user space app.
2655 This enum is made generic to allow addition of custom aspect
2656 ratios.</td>
2657 </tr>
2658 <tr>
2644 <td valign="top" >“dirty”</td> 2659 <td valign="top" >“dirty”</td>
2645 <td valign="top" >ENUM | IMMUTABLE</td> 2660 <td valign="top" >ENUM | IMMUTABLE</td>
2646 <td valign="top" >{ "Off", "On", "Annotate" }</td> 2661 <td valign="top" >{ "Off", "On", "Annotate" }</td>
@@ -2649,7 +2664,7 @@ void intel_crt_init(struct drm_device *dev)
2649 </tr> 2664 </tr>
2650 <tr> 2665 <tr>
2651 <td rowspan="21" valign="top" >i915</td> 2666 <td rowspan="21" valign="top" >i915</td>
2652 <td rowspan="3" valign="top" >Generic</td> 2667 <td rowspan="2" valign="top" >Generic</td>
2653 <td valign="top" >"Broadcast RGB"</td> 2668 <td valign="top" >"Broadcast RGB"</td>
2654 <td valign="top" >ENUM</td> 2669 <td valign="top" >ENUM</td>
2655 <td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td> 2670 <td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td>
@@ -2664,10 +2679,11 @@ void intel_crt_init(struct drm_device *dev)
2664 <td valign="top" >TBD</td> 2679 <td valign="top" >TBD</td>
2665 </tr> 2680 </tr>
2666 <tr> 2681 <tr>
2667 <td valign="top" >Standard name as in DRM</td> 2682 <td rowspan="1" valign="top" >Plane</td>
2668 <td valign="top" >Standard type as in DRM</td> 2683 <td valign="top" >“rotation”</td>
2669 <td valign="top" >Standard value as in DRM</td> 2684 <td valign="top" >BITMASK</td>
2670 <td valign="top" >Standard Object as in DRM</td> 2685 <td valign="top" >{ 0, "rotate-0" }, { 2, "rotate-180" }</td>
2686 <td valign="top" >Plane</td>
2671 <td valign="top" >TBD</td> 2687 <td valign="top" >TBD</td>
2672 </tr> 2688 </tr>
2673 <tr> 2689 <tr>
@@ -2799,8 +2815,8 @@ void intel_crt_init(struct drm_device *dev)
2799 <td valign="top" >TBD</td> 2815 <td valign="top" >TBD</td>
2800 </tr> 2816 </tr>
2801 <tr> 2817 <tr>
2802 <td rowspan="3" valign="top" >CDV gma-500</td> 2818 <td rowspan="2" valign="top" >CDV gma-500</td>
2803 <td rowspan="3" valign="top" >Generic</td> 2819 <td rowspan="2" valign="top" >Generic</td>
2804 <td valign="top" >"Broadcast RGB"</td> 2820 <td valign="top" >"Broadcast RGB"</td>
2805 <td valign="top" >ENUM</td> 2821 <td valign="top" >ENUM</td>
2806 <td valign="top" >{ “Full”, “Limited 16:235” }</td> 2822 <td valign="top" >{ “Full”, “Limited 16:235” }</td>
@@ -2815,15 +2831,8 @@ void intel_crt_init(struct drm_device *dev)
2815 <td valign="top" >TBD</td> 2831 <td valign="top" >TBD</td>
2816 </tr> 2832 </tr>
2817 <tr> 2833 <tr>
2818 <td valign="top" >Standard name as in DRM</td> 2834 <td rowspan="19" valign="top" >Poulsbo</td>
2819 <td valign="top" >Standard type as in DRM</td> 2835 <td rowspan="1" valign="top" >Generic</td>
2820 <td valign="top" >Standard value as in DRM</td>
2821 <td valign="top" >Standard Object as in DRM</td>
2822 <td valign="top" >TBD</td>
2823 </tr>
2824 <tr>
2825 <td rowspan="20" valign="top" >Poulsbo</td>
2826 <td rowspan="2" valign="top" >Generic</td>
2827 <td valign="top" >“backlight”</td> 2836 <td valign="top" >“backlight”</td>
2828 <td valign="top" >RANGE</td> 2837 <td valign="top" >RANGE</td>
2829 <td valign="top" >Min=0, Max=100</td> 2838 <td valign="top" >Min=0, Max=100</td>
@@ -2831,13 +2840,6 @@ void intel_crt_init(struct drm_device *dev)
2831 <td valign="top" >TBD</td> 2840 <td valign="top" >TBD</td>
2832 </tr> 2841 </tr>
2833 <tr> 2842 <tr>
2834 <td valign="top" >Standard name as in DRM</td>
2835 <td valign="top" >Standard type as in DRM</td>
2836 <td valign="top" >Standard value as in DRM</td>
2837 <td valign="top" >Standard Object as in DRM</td>
2838 <td valign="top" >TBD</td>
2839 </tr>
2840 <tr>
2841 <td rowspan="17" valign="top" >SDVO-TV</td> 2843 <td rowspan="17" valign="top" >SDVO-TV</td>
2842 <td valign="top" >“mode”</td> 2844 <td valign="top" >“mode”</td>
2843 <td valign="top" >ENUM</td> 2845 <td valign="top" >ENUM</td>
@@ -3064,7 +3066,7 @@ void intel_crt_init(struct drm_device *dev)
3064 <td valign="top" >TBD</td> 3066 <td valign="top" >TBD</td>
3065 </tr> 3067 </tr>
3066 <tr> 3068 <tr>
3067 <td rowspan="3" valign="top" >i2c/ch7006_drv</td> 3069 <td rowspan="2" valign="top" >i2c/ch7006_drv</td>
3068 <td valign="top" >Generic</td> 3070 <td valign="top" >Generic</td>
3069 <td valign="top" >“scale”</td> 3071 <td valign="top" >“scale”</td>
3070 <td valign="top" >RANGE</td> 3072 <td valign="top" >RANGE</td>
@@ -3073,14 +3075,7 @@ void intel_crt_init(struct drm_device *dev)
3073 <td valign="top" >TBD</td> 3075 <td valign="top" >TBD</td>
3074 </tr> 3076 </tr>
3075 <tr> 3077 <tr>
3076 <td rowspan="2" valign="top" >TV</td> 3078 <td rowspan="1" valign="top" >TV</td>
3077 <td valign="top" >Standard names as in DRM</td>
3078 <td valign="top" >Standard types as in DRM</td>
3079 <td valign="top" >Standard Values as in DRM</td>
3080 <td valign="top" >Standard object as in DRM</td>
3081 <td valign="top" >TBD</td>
3082 </tr>
3083 <tr>
3084 <td valign="top" >“mode”</td> 3079 <td valign="top" >“mode”</td>
3085 <td valign="top" >ENUM</td> 3080 <td valign="top" >ENUM</td>
3086 <td valign="top" >{ "PAL", "PAL-M","PAL-N"}, ”PAL-Nc" 3081 <td valign="top" >{ "PAL", "PAL-M","PAL-N"}, ”PAL-Nc"
@@ -3089,7 +3084,7 @@ void intel_crt_init(struct drm_device *dev)
3089 <td valign="top" >TBD</td> 3084 <td valign="top" >TBD</td>
3090 </tr> 3085 </tr>
3091 <tr> 3086 <tr>
3092 <td rowspan="16" valign="top" >nouveau</td> 3087 <td rowspan="15" valign="top" >nouveau</td>
3093 <td rowspan="6" valign="top" >NV10 Overlay</td> 3088 <td rowspan="6" valign="top" >NV10 Overlay</td>
3094 <td valign="top" >"colorkey"</td> 3089 <td valign="top" >"colorkey"</td>
3095 <td valign="top" >RANGE</td> 3090 <td valign="top" >RANGE</td>
@@ -3198,14 +3193,6 @@ void intel_crt_init(struct drm_device *dev)
3198 <td valign="top" >TBD</td> 3193 <td valign="top" >TBD</td>
3199 </tr> 3194 </tr>
3200 <tr> 3195 <tr>
3201 <td valign="top" >Generic</td>
3202 <td valign="top" >Standard name as in DRM</td>
3203 <td valign="top" >Standard type as in DRM</td>
3204 <td valign="top" >Standard value as in DRM</td>
3205 <td valign="top" >Standard Object as in DRM</td>
3206 <td valign="top" >TBD</td>
3207 </tr>
3208 <tr>
3209 <td rowspan="2" valign="top" >omap</td> 3196 <td rowspan="2" valign="top" >omap</td>
3210 <td rowspan="2" valign="top" >Generic</td> 3197 <td rowspan="2" valign="top" >Generic</td>
3211 <td valign="top" >“rotation”</td> 3198 <td valign="top" >“rotation”</td>
@@ -3236,7 +3223,7 @@ void intel_crt_init(struct drm_device *dev)
3236 <td valign="top" >TBD</td> 3223 <td valign="top" >TBD</td>
3237 </tr> 3224 </tr>
3238 <tr> 3225 <tr>
3239 <td rowspan="10" valign="top" >radeon</td> 3226 <td rowspan="9" valign="top" >radeon</td>
3240 <td valign="top" >DVI-I</td> 3227 <td valign="top" >DVI-I</td>
3241 <td valign="top" >“coherent”</td> 3228 <td valign="top" >“coherent”</td>
3242 <td valign="top" >RANGE</td> 3229 <td valign="top" >RANGE</td>
@@ -3308,14 +3295,6 @@ void intel_crt_init(struct drm_device *dev)
3308 <td valign="top" >TBD</td> 3295 <td valign="top" >TBD</td>
3309 </tr> 3296 </tr>
3310 <tr> 3297 <tr>
3311 <td valign="top" >Generic</td>
3312 <td valign="top" >Standard name as in DRM</td>
3313 <td valign="top" >Standard type as in DRM</td>
3314 <td valign="top" >Standard value as in DRM</td>
3315 <td valign="top" >Standard Object as in DRM</td>
3316 <td valign="top" >TBD</td>
3317 </tr>
3318 <tr>
3319 <td rowspan="3" valign="top" >rcar-du</td> 3298 <td rowspan="3" valign="top" >rcar-du</td>
3320 <td rowspan="3" valign="top" >Generic</td> 3299 <td rowspan="3" valign="top" >Generic</td>
3321 <td valign="top" >"alpha"</td> 3300 <td valign="top" >"alpha"</td>
diff --git a/Documentation/devicetree/bindings/drm/armada/marvell,dove-lcd.txt b/Documentation/devicetree/bindings/drm/armada/marvell,dove-lcd.txt
new file mode 100644
index 000000000000..46525ea3e646
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/armada/marvell,dove-lcd.txt
@@ -0,0 +1,30 @@
1Device Tree bindings for Armada DRM CRTC driver
2
3Required properties:
4 - compatible: value should be "marvell,dove-lcd".
5 - reg: base address and size of the LCD controller
6 - interrupts: single interrupt number for the LCD controller
7 - port: video output port with endpoints, as described by graph.txt
8
9Optional properties:
10
11 - clocks: as described by clock-bindings.txt
12 - clock-names: as described by clock-bindings.txt
13 "axiclk" - axi bus clock for pixel clock
14 "plldivider" - pll divider clock for pixel clock
15 "ext_ref_clk0" - external clock 0 for pixel clock
16 "ext_ref_clk1" - external clock 1 for pixel clock
17
18Note: all clocks are optional but at least one must be specified.
19Further clocks may be added in the future according to requirements of
20different SoCs.
21
22Example:
23
24 lcd0: lcd-controller@820000 {
25 compatible = "marvell,dove-lcd";
26 reg = <0x820000 0x1000>;
27 interrupts = <47>;
28 clocks = <&si5351 0>;
29 clock-names = "ext_ref_clk_1";
30 };
diff --git a/Documentation/devicetree/bindings/drm/i2c/tda998x.txt b/Documentation/devicetree/bindings/drm/i2c/tda998x.txt
index d7df01c5bb3a..e9e4bce40760 100644
--- a/Documentation/devicetree/bindings/drm/i2c/tda998x.txt
+++ b/Documentation/devicetree/bindings/drm/i2c/tda998x.txt
@@ -3,6 +3,8 @@ Device-Tree bindings for the NXP TDA998x HDMI transmitter
3Required properties; 3Required properties;
4 - compatible: must be "nxp,tda998x" 4 - compatible: must be "nxp,tda998x"
5 5
6 - reg: I2C address
7
6Optional properties: 8Optional properties:
7 - interrupts: interrupt number and trigger type 9 - interrupts: interrupt number and trigger type
8 default: polling 10 default: polling
diff --git a/Documentation/devicetree/bindings/drm/msm/gpu.txt b/Documentation/devicetree/bindings/drm/msm/gpu.txt
new file mode 100644
index 000000000000..67d0a58dbb77
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/msm/gpu.txt
@@ -0,0 +1,52 @@
1Qualcomm adreno/snapdragon GPU
2
3Required properties:
4- compatible: "qcom,adreno-3xx"
5- reg: Physical base address and length of the controller's registers.
6- interrupts: The interrupt signal from the gpu.
7- clocks: device clocks
8 See ../clocks/clock-bindings.txt for details.
9- clock-names: the following clocks are required:
10 * "core_clk"
11 * "iface_clk"
12 * "mem_iface_clk"
13- qcom,chipid: gpu chip-id. Note this may become optional for future
14 devices if we can reliably read the chipid from hw
15- qcom,gpu-pwrlevels: list of operating points
16 - compatible: "qcom,gpu-pwrlevels"
17 - for each qcom,gpu-pwrlevel:
18 - qcom,gpu-freq: requested gpu clock speed
19 - NOTE: downstream android driver defines additional parameters to
20 configure memory bandwidth scaling per OPP.
21
22Example:
23
24/ {
25 ...
26
27 gpu: qcom,kgsl-3d0@4300000 {
28 compatible = "qcom,adreno-3xx";
29 reg = <0x04300000 0x20000>;
30 reg-names = "kgsl_3d0_reg_memory";
31 interrupts = <GIC_SPI 80 0>;
32 interrupt-names = "kgsl_3d0_irq";
33 clock-names =
34 "core_clk",
35 "iface_clk",
36 "mem_iface_clk";
37 clocks =
38 <&mmcc GFX3D_CLK>,
39 <&mmcc GFX3D_AHB_CLK>,
40 <&mmcc MMSS_IMEM_AHB_CLK>;
41 qcom,chipid = <0x03020100>;
42 qcom,gpu-pwrlevels {
43 compatible = "qcom,gpu-pwrlevels";
44 qcom,gpu-pwrlevel@0 {
45 qcom,gpu-freq = <450000000>;
46 };
47 qcom,gpu-pwrlevel@1 {
48 qcom,gpu-freq = <27000000>;
49 };
50 };
51 };
52};
diff --git a/Documentation/devicetree/bindings/drm/msm/hdmi.txt b/Documentation/devicetree/bindings/drm/msm/hdmi.txt
new file mode 100644
index 000000000000..aca917fe2ba7
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/msm/hdmi.txt
@@ -0,0 +1,46 @@
1Qualcomm adreno/snapdragon hdmi output
2
3Required properties:
4- compatible: one of the following
5 * "qcom,hdmi-tx-8660"
6 * "qcom,hdmi-tx-8960"
7- reg: Physical base address and length of the controller's registers
8- reg-names: "core_physical"
9- interrupts: The interrupt signal from the hdmi block.
10- clocks: device clocks
11 See ../clocks/clock-bindings.txt for details.
12- qcom,hdmi-tx-ddc-clk-gpio: ddc clk pin
13- qcom,hdmi-tx-ddc-data-gpio: ddc data pin
14- qcom,hdmi-tx-hpd-gpio: hpd pin
15- core-vdda-supply: phandle to supply regulator
16- hdmi-mux-supply: phandle to mux regulator
17
18Optional properties:
19- qcom,hdmi-tx-mux-en-gpio: hdmi mux enable pin
20- qcom,hdmi-tx-mux-sel-gpio: hdmi mux select pin
21
22Example:
23
24/ {
25 ...
26
27 hdmi: qcom,hdmi-tx-8960@4a00000 {
28 compatible = "qcom,hdmi-tx-8960";
29 reg-names = "core_physical";
30 reg = <0x04a00000 0x1000>;
31 interrupts = <GIC_SPI 79 0>;
32 clock-names =
33 "core_clk",
34 "master_iface_clk",
35 "slave_iface_clk";
36 clocks =
37 <&mmcc HDMI_APP_CLK>,
38 <&mmcc HDMI_M_AHB_CLK>,
39 <&mmcc HDMI_S_AHB_CLK>;
40 qcom,hdmi-tx-ddc-clk = <&msmgpio 70 GPIO_ACTIVE_HIGH>;
41 qcom,hdmi-tx-ddc-data = <&msmgpio 71 GPIO_ACTIVE_HIGH>;
42 qcom,hdmi-tx-hpd = <&msmgpio 72 GPIO_ACTIVE_HIGH>;
43 core-vdda-supply = <&pm8921_hdmi_mvs>;
44 hdmi-mux-supply = <&ext_3p3v>;
45 };
46};
diff --git a/Documentation/devicetree/bindings/drm/msm/mdp.txt b/Documentation/devicetree/bindings/drm/msm/mdp.txt
new file mode 100644
index 000000000000..1a0598e5279d
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/msm/mdp.txt
@@ -0,0 +1,48 @@
1Qualcomm adreno/snapdragon display controller
2
3Required properties:
4- compatible:
5 * "qcom,mdp" - mdp4
6- reg: Physical base address and length of the controller's registers.
7- interrupts: The interrupt signal from the display controller.
8- connectors: array of phandles for output device(s)
9- clocks: device clocks
10 See ../clocks/clock-bindings.txt for details.
11- clock-names: the following clocks are required:
12 * "core_clk"
13 * "iface_clk"
14 * "lut_clk"
15 * "src_clk"
16 * "hdmi_clk"
17 * "mpd_clk"
18
19Optional properties:
20- gpus: phandle for gpu device
21
22Example:
23
24/ {
25 ...
26
27 mdp: qcom,mdp@5100000 {
28 compatible = "qcom,mdp";
29 reg = <0x05100000 0xf0000>;
30 interrupts = <GIC_SPI 75 0>;
31 connectors = <&hdmi>;
32 gpus = <&gpu>;
33 clock-names =
34 "core_clk",
35 "iface_clk",
36 "lut_clk",
37 "src_clk",
38 "hdmi_clk",
39 "mdp_clk";
40 clocks =
41 <&mmcc MDP_SRC>,
42 <&mmcc MDP_AHB_CLK>,
43 <&mmcc MDP_LUT_CLK>,
44 <&mmcc TV_SRC>,
45 <&mmcc HDMI_TV_CLK>,
46 <&mmcc MDP_TV_CLK>;
47 };
48};
diff --git a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
new file mode 100644
index 000000000000..2d150c311a05
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
@@ -0,0 +1,189 @@
1STMicroelectronics stih4xx platforms
2
3- sti-vtg: video timing generator
4 Required properties:
5 - compatible: "st,vtg"
6 - reg: Physical base address of the IP registers and length of memory mapped region.
7 Optional properties:
8 - interrupts : VTG interrupt number to the CPU.
9 - st,slave: phandle on a slave vtg
10
11- sti-vtac: video timing advanced inter dye communication Rx and TX
12 Required properties:
13 - compatible: "st,vtac-main" or "st,vtac-aux"
14 - reg: Physical base address of the IP registers and length of memory mapped region.
15 - clocks: from common clock binding: handle hardware IP needed clocks, the
16 number of clocks may depend of the SoC type.
17 See ../clocks/clock-bindings.txt for details.
18 - clock-names: names of the clocks listed in clocks property in the same
19 order.
20
21- sti-display-subsystem: Master device for DRM sub-components
22 This device must be the parent of all the sub-components and is responsible
23 of bind them.
24 Required properties:
25 - compatible: "st,sti-display-subsystem"
26 - ranges: to allow probing of subdevices
27
28- sti-compositor: frame compositor engine
29 must be a child of sti-display-subsystem
30 Required properties:
31 - compatible: "st,stih<chip>-compositor"
32 - reg: Physical base address of the IP registers and length of memory mapped region.
33 - clocks: from common clock binding: handle hardware IP needed clocks, the
34 number of clocks may depend of the SoC type.
35 See ../clocks/clock-bindings.txt for details.
36 - clock-names: names of the clocks listed in clocks property in the same
37 order.
38 - resets: resets to be used by the device
39 See ../reset/reset.txt for details.
40 - reset-names: names of the resets listed in resets property in the same
41 order.
42 - st,vtg: phandle(s) on vtg device (main and aux) nodes.
43
44- sti-tvout: video out hardware block
45 must be a child of sti-display-subsystem
46 Required properties:
47 - compatible: "st,stih<chip>-tvout"
48 - reg: Physical base address of the IP registers and length of memory mapped region.
49 - reg-names: names of the mapped memory regions listed in regs property in
50 the same order.
51 - resets: resets to be used by the device
52 See ../reset/reset.txt for details.
53 - reset-names: names of the resets listed in resets property in the same
54 order.
55 - ranges: to allow probing of subdevices
56
57- sti-hdmi: hdmi output block
58 must be a child of sti-tvout
59 Required properties:
60 - compatible: "st,stih<chip>-hdmi";
61 - reg: Physical base address of the IP registers and length of memory mapped region.
62 - reg-names: names of the mapped memory regions listed in regs property in
63 the same order.
64 - interrupts : HDMI interrupt number to the CPU.
65 - interrupt-names: name of the interrupts listed in interrupts property in
66 the same order
67 - clocks: from common clock binding: handle hardware IP needed clocks, the
68 number of clocks may depend of the SoC type.
69 - clock-names: names of the clocks listed in clocks property in the same
70 order.
71 - hdmi,hpd-gpio: gpio id to detect if an hdmi cable is plugged or not.
72
73sti-hda:
74 Required properties:
75 must be a child of sti-tvout
76 - compatible: "st,stih<chip>-hda"
77 - reg: Physical base address of the IP registers and length of memory mapped region.
78 - reg-names: names of the mapped memory regions listed in regs property in
79 the same order.
80 - clocks: from common clock binding: handle hardware IP needed clocks, the
81 number of clocks may depend of the SoC type.
82 See ../clocks/clock-bindings.txt for details.
83 - clock-names: names of the clocks listed in clocks property in the same
84 order.
85
86Example:
87
88/ {
89 ...
90
91 vtg_main_slave: sti-vtg-main-slave@fe85A800 {
92 compatible = "st,vtg";
93 reg = <0xfe85A800 0x300>;
94 interrupts = <GIC_SPI 175 IRQ_TYPE_NONE>;
95 };
96
97 vtg_main: sti-vtg-main-master@fd348000 {
98 compatible = "st,vtg";
99 reg = <0xfd348000 0x400>;
100 st,slave = <&vtg_main_slave>;
101 };
102
103 vtg_aux_slave: sti-vtg-aux-slave@fd348400 {
104 compatible = "st,vtg";
105 reg = <0xfe858200 0x300>;
106 interrupts = <GIC_SPI 176 IRQ_TYPE_NONE>;
107 };
108
109 vtg_aux: sti-vtg-aux-master@fd348400 {
110 compatible = "st,vtg";
111 reg = <0xfd348400 0x400>;
112 st,slave = <&vtg_aux_slave>;
113 };
114
115
116 sti-vtac-rx-main@fee82800 {
117 compatible = "st,vtac-main";
118 reg = <0xfee82800 0x200>;
119 clock-names = "vtac";
120 clocks = <&clk_m_a2_div0 CLK_M_VTAC_MAIN_PHY>;
121 };
122
123 sti-vtac-rx-aux@fee82a00 {
124 compatible = "st,vtac-aux";
125 reg = <0xfee82a00 0x200>;
126 clock-names = "vtac";
127 clocks = <&clk_m_a2_div0 CLK_M_VTAC_AUX_PHY>;
128 };
129
130 sti-vtac-tx-main@fd349000 {
131 compatible = "st,vtac-main";
132 reg = <0xfd349000 0x200>, <0xfd320000 0x10000>;
133 clock-names = "vtac";
134 clocks = <&clk_s_a1_hs CLK_S_VTAC_TX_PHY>;
135 };
136
137 sti-vtac-tx-aux@fd349200 {
138 compatible = "st,vtac-aux";
139 reg = <0xfd349200 0x200>, <0xfd320000 0x10000>;
140 clock-names = "vtac";
141 clocks = <&clk_s_a1_hs CLK_S_VTAC_TX_PHY>;
142 };
143
144 sti-display-subsystem {
145 compatible = "st,sti-display-subsystem";
146 ranges;
147
148 sti-compositor@fd340000 {
149 compatible = "st,stih416-compositor";
150 reg = <0xfd340000 0x1000>;
151 clock-names = "compo_main", "compo_aux",
152 "pix_main", "pix_aux";
153 clocks = <&clk_m_a2_div1 CLK_M_COMPO_MAIN>, <&clk_m_a2_div1 CLK_M_COMPO_AUX>,
154 <&clockgen_c_vcc CLK_S_PIX_MAIN>, <&clockgen_c_vcc CLK_S_PIX_AUX>;
155 reset-names = "compo-main", "compo-aux";
156 resets = <&softreset STIH416_COMPO_M_SOFTRESET>, <&softreset STIH416_COMPO_A_SOFTRESET>;
157 st,vtg = <&vtg_main>, <&vtg_aux>;
158 };
159
160 sti-tvout@fe000000 {
161 compatible = "st,stih416-tvout";
162 reg = <0xfe000000 0x1000>, <0xfe85a000 0x400>, <0xfe830000 0x10000>;
163 reg-names = "tvout-reg", "hda-reg", "syscfg";
164 reset-names = "tvout";
165 resets = <&softreset STIH416_HDTVOUT_SOFTRESET>;
166 ranges;
167
168 sti-hdmi@fe85c000 {
169 compatible = "st,stih416-hdmi";
170 reg = <0xfe85c000 0x1000>, <0xfe830000 0x10000>;
171 reg-names = "hdmi-reg", "syscfg";
172 interrupts = <GIC_SPI 173 IRQ_TYPE_NONE>;
173 interrupt-names = "irq";
174 clock-names = "pix", "tmds", "phy", "audio";
175 clocks = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>;
176 hdmi,hpd-gpio = <&PIO2 5>;
177 };
178
179 sti-hda@fe85a000 {
180 compatible = "st,stih416-hda";
181 reg = <0xfe85a000 0x400>, <0xfe83085c 0x4>;
182 reg-names = "hda-reg", "video-dacs-ctrl";
183 clock-names = "pix", "hddac";
184 clocks = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>;
185 };
186 };
187 };
188 ...
189};
diff --git a/Documentation/devicetree/bindings/panel/auo,b133htn01.txt b/Documentation/devicetree/bindings/panel/auo,b133htn01.txt
new file mode 100644
index 000000000000..302226b5bb55
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/auo,b133htn01.txt
@@ -0,0 +1,7 @@
1AU Optronics Corporation 13.3" FHD (1920x1080) color TFT-LCD panel
2
3Required properties:
4- compatible: should be "auo,b133htn01"
5
6This binding is compatible with the simple-panel binding, which is specified
7in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/foxlink,fl500wvr00-a0t.txt b/Documentation/devicetree/bindings/panel/foxlink,fl500wvr00-a0t.txt
new file mode 100644
index 000000000000..b47f9d87bc19
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/foxlink,fl500wvr00-a0t.txt
@@ -0,0 +1,7 @@
1Foxlink Group 5" WVGA TFT LCD panel
2
3Required properties:
4- compatible: should be "foxlink,fl500wvr00-a0t"
5
6This binding is compatible with the simple-panel binding, which is specified
7in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/innolux,n116bge.txt b/Documentation/devicetree/bindings/panel/innolux,n116bge.txt
new file mode 100644
index 000000000000..081bb939ed31
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/innolux,n116bge.txt
@@ -0,0 +1,7 @@
1Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
2
3Required properties:
4- compatible: should be "innolux,n116bge"
5
6This binding is compatible with the simple-panel binding, which is specified
7in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/innolux,n156bge-l21.txt b/Documentation/devicetree/bindings/panel/innolux,n156bge-l21.txt
new file mode 100644
index 000000000000..7825844aafdf
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/innolux,n156bge-l21.txt
@@ -0,0 +1,7 @@
1InnoLux 15.6" WXGA TFT LCD panel
2
3Required properties:
4- compatible: should be "innolux,n156bge-l21"
5
6This binding is compatible with the simple-panel binding, which is specified
7in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/video/exynos_dsim.txt b/Documentation/devicetree/bindings/video/exynos_dsim.txt
index 33b5730d07ba..31036c667d54 100644
--- a/Documentation/devicetree/bindings/video/exynos_dsim.txt
+++ b/Documentation/devicetree/bindings/video/exynos_dsim.txt
@@ -1,7 +1,9 @@
1Exynos MIPI DSI Master 1Exynos MIPI DSI Master
2 2
3Required properties: 3Required properties:
4 - compatible: "samsung,exynos4210-mipi-dsi" 4 - compatible: value should be one of the following
5 "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
6 "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
5 - reg: physical base address and length of the registers set for the device 7 - reg: physical base address and length of the registers set for the device
6 - interrupts: should contain DSI interrupt 8 - interrupts: should contain DSI interrupt
7 - clocks: list of clock specifiers, must contain an entry for each required 9 - clocks: list of clock specifiers, must contain an entry for each required
diff --git a/Documentation/devicetree/bindings/video/exynos_mixer.txt b/Documentation/devicetree/bindings/video/exynos_mixer.txt
index 7bfde9c9d658..08b394b1edbf 100644
--- a/Documentation/devicetree/bindings/video/exynos_mixer.txt
+++ b/Documentation/devicetree/bindings/video/exynos_mixer.txt
@@ -4,8 +4,9 @@ Required properties:
4- compatible: value should be one of the following: 4- compatible: value should be one of the following:
5 1) "samsung,exynos5-mixer" <DEPRECATED> 5 1) "samsung,exynos5-mixer" <DEPRECATED>
6 2) "samsung,exynos4210-mixer" 6 2) "samsung,exynos4210-mixer"
7 3) "samsung,exynos5250-mixer" 7 3) "samsung,exynos4212-mixer"
8 4) "samsung,exynos5420-mixer" 8 4) "samsung,exynos5250-mixer"
9 5) "samsung,exynos5420-mixer"
9 10
10- reg: physical base address of the mixer and length of memory mapped 11- reg: physical base address of the mixer and length of memory mapped
11 region. 12 region.
diff --git a/Documentation/devicetree/bindings/video/samsung-fimd.txt b/Documentation/devicetree/bindings/video/samsung-fimd.txt
index 2dad41b689af..8428fcff8037 100644
--- a/Documentation/devicetree/bindings/video/samsung-fimd.txt
+++ b/Documentation/devicetree/bindings/video/samsung-fimd.txt
@@ -44,6 +44,34 @@ Optional Properties:
44- display-timings: timing settings for FIMD, as described in document [1]. 44- display-timings: timing settings for FIMD, as described in document [1].
45 Can be used in case timings cannot be provided otherwise 45 Can be used in case timings cannot be provided otherwise
46 or to override timings provided by the panel. 46 or to override timings provided by the panel.
47- samsung,sysreg: handle to syscon used to control the system registers
48- i80-if-timings: timing configuration for lcd i80 interface support.
49 - cs-setup: clock cycles for the active period of address signal is enabled
50 until chip select is enabled.
51 If not specified, the default value(0) will be used.
52 - wr-setup: clock cycles for the active period of CS signal is enabled until
53 write signal is enabled.
54 If not specified, the default value(0) will be used.
55 - wr-active: clock cycles for the active period of CS is enabled.
56 If not specified, the default value(1) will be used.
57 - wr-hold: clock cycles for the active period of CS is disabled until write
58 signal is disabled.
59 If not specified, the default value(0) will be used.
60
61 The parameters are defined as:
62
63 VCLK(internal) __|??????|_____|??????|_____|??????|_____|??????|_____|??
64 : : : : :
65 Address Output --:<XXXXXXXXXXX:XXXXXXXXXXXX:XXXXXXXXXXXX:XXXXXXXXXXXX:XX
66 | cs-setup+1 | : : :
67 |<---------->| : : :
68 Chip Select ???????????????|____________:____________:____________|??
69 | wr-setup+1 | | wr-hold+1 |
70 |<---------->| |<---------->|
71 Write Enable ????????????????????????????|____________|???????????????
72 | wr-active+1|
73 |<---------->|
74 Video Data ----------------------------<XXXXXXXXXXXXXXXXXXXXXXXXX>--
47 75
48The device node can contain 'port' child nodes according to the bindings defined 76The device node can contain 'port' child nodes according to the bindings defined
49in [2]. The following are properties specific to those nodes: 77in [2]. The following are properties specific to those nodes:
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index 93bcc1fe8a4e..bd3b9b537976 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -612,6 +612,7 @@
612 clocks = <&clock CLK_SCLK_FIMD0>, <&clock CLK_FIMD0>; 612 clocks = <&clock CLK_SCLK_FIMD0>, <&clock CLK_FIMD0>;
613 clock-names = "sclk_fimd", "fimd"; 613 clock-names = "sclk_fimd", "fimd";
614 samsung,power-domain = <&pd_lcd0>; 614 samsung,power-domain = <&pd_lcd0>;
615 samsung,sysreg = <&sys_reg>;
615 status = "disabled"; 616 status = "disabled";
616 }; 617 };
617}; 618};
diff --git a/arch/arm/boot/dts/exynos5.dtsi b/arch/arm/boot/dts/exynos5.dtsi
index ff2d2cb0f79e..a0cc0b6f8f96 100644
--- a/arch/arm/boot/dts/exynos5.dtsi
+++ b/arch/arm/boot/dts/exynos5.dtsi
@@ -94,6 +94,7 @@
94 reg = <0x14400000 0x40000>; 94 reg = <0x14400000 0x40000>;
95 interrupt-names = "fifo", "vsync", "lcd_sys"; 95 interrupt-names = "fifo", "vsync", "lcd_sys";
96 interrupts = <18 4>, <18 5>, <18 6>; 96 interrupts = <18 4>, <18 5>, <18 6>;
97 samsung,sysreg = <&sysreg_system_controller>;
97 status = "disabled"; 98 status = "disabled";
98 }; 99 };
99 100
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index a40a5c2b5a4f..08dd681c0019 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -520,6 +520,26 @@
520 phy-names = "dp"; 520 phy-names = "dp";
521 }; 521 };
522 522
523 mipi_phy: video-phy@10040714 {
524 compatible = "samsung,s5pv210-mipi-video-phy";
525 reg = <0x10040714 12>;
526 #phy-cells = <1>;
527 };
528
529 dsi@14500000 {
530 compatible = "samsung,exynos5410-mipi-dsi";
531 reg = <0x14500000 0x10000>;
532 interrupts = <0 82 0>;
533 samsung,power-domain = <&disp_pd>;
534 phys = <&mipi_phy 1>;
535 phy-names = "dsim";
536 clocks = <&clock CLK_DSIM1>, <&clock CLK_SCLK_MIPI1>;
537 clock-names = "bus_clk", "pll_clk";
538 #address-cells = <1>;
539 #size-cells = <0>;
540 status = "disabled";
541 };
542
523 fimd: fimd@14400000 { 543 fimd: fimd@14400000 {
524 samsung,power-domain = <&disp_pd>; 544 samsung,power-domain = <&disp_pd>;
525 clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>; 545 clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>;
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index b29703324e94..09f17eb73486 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -710,19 +710,6 @@ static int agp_open(struct inode *inode, struct file *file)
710 return 0; 710 return 0;
711} 711}
712 712
713
714static ssize_t agp_read(struct file *file, char __user *buf,
715 size_t count, loff_t * ppos)
716{
717 return -EINVAL;
718}
719
720static ssize_t agp_write(struct file *file, const char __user *buf,
721 size_t count, loff_t * ppos)
722{
723 return -EINVAL;
724}
725
726static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg) 713static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
727{ 714{
728 struct agp_info userinfo; 715 struct agp_info userinfo;
@@ -1047,8 +1034,6 @@ static const struct file_operations agp_fops =
1047{ 1034{
1048 .owner = THIS_MODULE, 1035 .owner = THIS_MODULE,
1049 .llseek = no_llseek, 1036 .llseek = no_llseek,
1050 .read = agp_read,
1051 .write = agp_write,
1052 .unlocked_ioctl = agp_ioctl, 1037 .unlocked_ioctl = agp_ioctl,
1053#ifdef CONFIG_COMPAT 1038#ifdef CONFIG_COMPAT
1054 .compat_ioctl = compat_agp_ioctl, 1039 .compat_ioctl = compat_agp_ioctl,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f5120046ff80..b066bb3ca01a 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -114,6 +114,7 @@ config DRM_RADEON
114 select POWER_SUPPLY 114 select POWER_SUPPLY
115 select HWMON 115 select HWMON
116 select BACKLIGHT_CLASS_DEVICE 116 select BACKLIGHT_CLASS_DEVICE
117 select INTERVAL_TREE
117 help 118 help
118 Choose this option if you have an ATI Radeon graphics card. There 119 Choose this option if you have an ATI Radeon graphics card. There
119 are both PCI and AGP versions. You don't need to choose this to 120 are both PCI and AGP versions. You don't need to choose this to
@@ -201,3 +202,5 @@ source "drivers/gpu/drm/msm/Kconfig"
201source "drivers/gpu/drm/tegra/Kconfig" 202source "drivers/gpu/drm/tegra/Kconfig"
202 203
203source "drivers/gpu/drm/panel/Kconfig" 204source "drivers/gpu/drm/panel/Kconfig"
205
206source "drivers/gpu/drm/sti/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index dd2ba4269740..4a55d59ccd22 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -6,8 +6,8 @@ ccflags-y := -Iinclude/drm
6 6
7drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ 7drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
8 drm_context.o drm_dma.o \ 8 drm_context.o drm_dma.o \
9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ 9 drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
10 drm_lock.o drm_memory.o drm_stub.o drm_vm.o \ 10 drm_lock.o drm_memory.o drm_drv.o drm_vm.o \
11 drm_agpsupport.o drm_scatter.o drm_pci.o \ 11 drm_agpsupport.o drm_scatter.o drm_pci.o \
12 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ 12 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
13 drm_crtc.o drm_modes.o drm_edid.o \ 13 drm_crtc.o drm_modes.o drm_edid.o \
@@ -20,11 +20,12 @@ drm-$(CONFIG_COMPAT) += drm_ioc32.o
20drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o 20drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
21drm-$(CONFIG_PCI) += ati_pcigart.o 21drm-$(CONFIG_PCI) += ati_pcigart.o
22drm-$(CONFIG_DRM_PANEL) += drm_panel.o 22drm-$(CONFIG_DRM_PANEL) += drm_panel.o
23drm-$(CONFIG_OF) += drm_of.o
23 24
24drm-usb-y := drm_usb.o 25drm-usb-y := drm_usb.o
25 26
26drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ 27drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
27 drm_plane_helper.o 28 drm_plane_helper.o drm_dp_mst_topology.o
28drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o 29drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
29drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o 30drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
30drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o 31drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
@@ -63,6 +64,7 @@ obj-$(CONFIG_DRM_QXL) += qxl/
63obj-$(CONFIG_DRM_BOCHS) += bochs/ 64obj-$(CONFIG_DRM_BOCHS) += bochs/
64obj-$(CONFIG_DRM_MSM) += msm/ 65obj-$(CONFIG_DRM_MSM) += msm/
65obj-$(CONFIG_DRM_TEGRA) += tegra/ 66obj-$(CONFIG_DRM_TEGRA) += tegra/
67obj-$(CONFIG_DRM_STI) += sti/
66obj-y += i2c/ 68obj-y += i2c/
67obj-y += panel/ 69obj-y += panel/
68obj-y += bridge/ 70obj-y += bridge/
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
index 59948eff6095..ad3d2ebf95c9 100644
--- a/drivers/gpu/drm/armada/armada_510.c
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -15,20 +15,19 @@
15#include "armada_drm.h" 15#include "armada_drm.h"
16#include "armada_hw.h" 16#include "armada_hw.h"
17 17
18static int armada510_init(struct armada_private *priv, struct device *dev) 18static int armada510_crtc_init(struct armada_crtc *dcrtc, struct device *dev)
19{ 19{
20 priv->extclk[0] = devm_clk_get(dev, "ext_ref_clk_1"); 20 struct clk *clk;
21 21
22 if (IS_ERR(priv->extclk[0]) && PTR_ERR(priv->extclk[0]) == -ENOENT) 22 clk = devm_clk_get(dev, "ext_ref_clk1");
23 priv->extclk[0] = ERR_PTR(-EPROBE_DEFER); 23 if (IS_ERR(clk))
24 return PTR_ERR(clk) == -ENOENT ? -EPROBE_DEFER : PTR_ERR(clk);
24 25
25 return PTR_RET(priv->extclk[0]); 26 dcrtc->extclk[0] = clk;
26}
27 27
28static int armada510_crtc_init(struct armada_crtc *dcrtc)
29{
30 /* Lower the watermark so to eliminate jitter at higher bandwidths */ 28 /* Lower the watermark so to eliminate jitter at higher bandwidths */
31 armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F); 29 armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
30
32 return 0; 31 return 0;
33} 32}
34 33
@@ -45,8 +44,7 @@ static int armada510_crtc_init(struct armada_crtc *dcrtc)
45static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc, 44static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
46 const struct drm_display_mode *mode, uint32_t *sclk) 45 const struct drm_display_mode *mode, uint32_t *sclk)
47{ 46{
48 struct armada_private *priv = dcrtc->crtc.dev->dev_private; 47 struct clk *clk = dcrtc->extclk[0];
49 struct clk *clk = priv->extclk[0];
50 int ret; 48 int ret;
51 49
52 if (dcrtc->num == 1) 50 if (dcrtc->num == 1)
@@ -81,7 +79,6 @@ static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
81const struct armada_variant armada510_ops = { 79const struct armada_variant armada510_ops = {
82 .has_spu_adv_reg = true, 80 .has_spu_adv_reg = true,
83 .spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND, 81 .spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
84 .init = armada510_init, 82 .init = armada510_crtc_init,
85 .crtc_init = armada510_crtc_init, 83 .compute_clock = armada510_crtc_compute_clock,
86 .crtc_compute_clock = armada510_crtc_compute_clock,
87}; 84};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 3aedf9e993e6..9a0cc09e6653 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -7,6 +7,9 @@
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9#include <linux/clk.h> 9#include <linux/clk.h>
10#include <linux/component.h>
11#include <linux/of_device.h>
12#include <linux/platform_device.h>
10#include <drm/drmP.h> 13#include <drm/drmP.h>
11#include <drm/drm_crtc_helper.h> 14#include <drm/drm_crtc_helper.h>
12#include "armada_crtc.h" 15#include "armada_crtc.h"
@@ -332,24 +335,23 @@ static void armada_drm_crtc_commit(struct drm_crtc *crtc)
332static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc, 335static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
333 const struct drm_display_mode *mode, struct drm_display_mode *adj) 336 const struct drm_display_mode *mode, struct drm_display_mode *adj)
334{ 337{
335 struct armada_private *priv = crtc->dev->dev_private;
336 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 338 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
337 int ret; 339 int ret;
338 340
339 /* We can't do interlaced modes if we don't have the SPU_ADV_REG */ 341 /* We can't do interlaced modes if we don't have the SPU_ADV_REG */
340 if (!priv->variant->has_spu_adv_reg && 342 if (!dcrtc->variant->has_spu_adv_reg &&
341 adj->flags & DRM_MODE_FLAG_INTERLACE) 343 adj->flags & DRM_MODE_FLAG_INTERLACE)
342 return false; 344 return false;
343 345
344 /* Check whether the display mode is possible */ 346 /* Check whether the display mode is possible */
345 ret = priv->variant->crtc_compute_clock(dcrtc, adj, NULL); 347 ret = dcrtc->variant->compute_clock(dcrtc, adj, NULL);
346 if (ret) 348 if (ret)
347 return false; 349 return false;
348 350
349 return true; 351 return true;
350} 352}
351 353
352void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat) 354static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
353{ 355{
354 struct armada_vbl_event *e, *n; 356 struct armada_vbl_event *e, *n;
355 void __iomem *base = dcrtc->base; 357 void __iomem *base = dcrtc->base;
@@ -410,6 +412,27 @@ void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
410 } 412 }
411} 413}
412 414
415static irqreturn_t armada_drm_irq(int irq, void *arg)
416{
417 struct armada_crtc *dcrtc = arg;
418 u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
419
420 /*
421 * This is rediculous - rather than writing bits to clear, we
422 * have to set the actual status register value. This is racy.
423 */
424 writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
425
426 /* Mask out those interrupts we haven't enabled */
427 v = stat & dcrtc->irq_ena;
428
429 if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
430 armada_drm_crtc_irq(dcrtc, stat);
431 return IRQ_HANDLED;
432 }
433 return IRQ_NONE;
434}
435
413/* These are locked by dev->vbl_lock */ 436/* These are locked by dev->vbl_lock */
414void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask) 437void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask)
415{ 438{
@@ -470,7 +493,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
470 struct drm_display_mode *mode, struct drm_display_mode *adj, 493 struct drm_display_mode *mode, struct drm_display_mode *adj,
471 int x, int y, struct drm_framebuffer *old_fb) 494 int x, int y, struct drm_framebuffer *old_fb)
472{ 495{
473 struct armada_private *priv = crtc->dev->dev_private;
474 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 496 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
475 struct armada_regs regs[17]; 497 struct armada_regs regs[17];
476 uint32_t lm, rm, tm, bm, val, sclk; 498 uint32_t lm, rm, tm, bm, val, sclk;
@@ -515,7 +537,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
515 } 537 }
516 538
517 /* Now compute the divider for real */ 539 /* Now compute the divider for real */
518 priv->variant->crtc_compute_clock(dcrtc, adj, &sclk); 540 dcrtc->variant->compute_clock(dcrtc, adj, &sclk);
519 541
520 /* Ensure graphic fifo is enabled */ 542 /* Ensure graphic fifo is enabled */
521 armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1); 543 armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
@@ -537,7 +559,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
537 dcrtc->v[1].spu_v_porch = tm << 16 | bm; 559 dcrtc->v[1].spu_v_porch = tm << 16 | bm;
538 val = adj->crtc_hsync_start; 560 val = adj->crtc_hsync_start;
539 dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN | 561 dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
540 priv->variant->spu_adv_reg; 562 dcrtc->variant->spu_adv_reg;
541 563
542 if (interlaced) { 564 if (interlaced) {
543 /* Odd interlaced frame */ 565 /* Odd interlaced frame */
@@ -546,7 +568,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
546 dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1; 568 dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
547 val = adj->crtc_hsync_start - adj->crtc_htotal / 2; 569 val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
548 dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN | 570 dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
549 priv->variant->spu_adv_reg; 571 dcrtc->variant->spu_adv_reg;
550 } else { 572 } else {
551 dcrtc->v[0] = dcrtc->v[1]; 573 dcrtc->v[0] = dcrtc->v[1];
552 } 574 }
@@ -561,7 +583,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
561 armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total, 583 armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
562 LCD_SPUT_V_H_TOTAL); 584 LCD_SPUT_V_H_TOTAL);
563 585
564 if (priv->variant->has_spu_adv_reg) { 586 if (dcrtc->variant->has_spu_adv_reg) {
565 armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg, 587 armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
566 ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF | 588 ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
567 ADV_VSYNCOFFEN, LCD_SPU_ADV_REG); 589 ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
@@ -805,12 +827,11 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
805{ 827{
806 struct drm_device *dev = crtc->dev; 828 struct drm_device *dev = crtc->dev;
807 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 829 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
808 struct armada_private *priv = crtc->dev->dev_private;
809 struct armada_gem_object *obj = NULL; 830 struct armada_gem_object *obj = NULL;
810 int ret; 831 int ret;
811 832
812 /* If no cursor support, replicate drm's return value */ 833 /* If no cursor support, replicate drm's return value */
813 if (!priv->variant->has_spu_adv_reg) 834 if (!dcrtc->variant->has_spu_adv_reg)
814 return -ENXIO; 835 return -ENXIO;
815 836
816 if (handle && w > 0 && h > 0) { 837 if (handle && w > 0 && h > 0) {
@@ -858,11 +879,10 @@ static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
858{ 879{
859 struct drm_device *dev = crtc->dev; 880 struct drm_device *dev = crtc->dev;
860 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 881 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
861 struct armada_private *priv = crtc->dev->dev_private;
862 int ret; 882 int ret;
863 883
864 /* If no cursor support, replicate drm's return value */ 884 /* If no cursor support, replicate drm's return value */
865 if (!priv->variant->has_spu_adv_reg) 885 if (!dcrtc->variant->has_spu_adv_reg)
866 return -EFAULT; 886 return -EFAULT;
867 887
868 mutex_lock(&dev->struct_mutex); 888 mutex_lock(&dev->struct_mutex);
@@ -888,6 +908,10 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
888 if (!IS_ERR(dcrtc->clk)) 908 if (!IS_ERR(dcrtc->clk))
889 clk_disable_unprepare(dcrtc->clk); 909 clk_disable_unprepare(dcrtc->clk);
890 910
911 writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ENA);
912
913 of_node_put(dcrtc->crtc.port);
914
891 kfree(dcrtc); 915 kfree(dcrtc);
892} 916}
893 917
@@ -1027,19 +1051,20 @@ static int armada_drm_crtc_create_properties(struct drm_device *dev)
1027 return 0; 1051 return 0;
1028} 1052}
1029 1053
1030int armada_drm_crtc_create(struct drm_device *dev, unsigned num, 1054int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1031 struct resource *res) 1055 struct resource *res, int irq, const struct armada_variant *variant,
1056 struct device_node *port)
1032{ 1057{
1033 struct armada_private *priv = dev->dev_private; 1058 struct armada_private *priv = drm->dev_private;
1034 struct armada_crtc *dcrtc; 1059 struct armada_crtc *dcrtc;
1035 void __iomem *base; 1060 void __iomem *base;
1036 int ret; 1061 int ret;
1037 1062
1038 ret = armada_drm_crtc_create_properties(dev); 1063 ret = armada_drm_crtc_create_properties(drm);
1039 if (ret) 1064 if (ret)
1040 return ret; 1065 return ret;
1041 1066
1042 base = devm_ioremap_resource(dev->dev, res); 1067 base = devm_ioremap_resource(dev, res);
1043 if (IS_ERR(base)) 1068 if (IS_ERR(base))
1044 return PTR_ERR(base); 1069 return PTR_ERR(base);
1045 1070
@@ -1049,8 +1074,12 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
1049 return -ENOMEM; 1074 return -ENOMEM;
1050 } 1075 }
1051 1076
1077 if (dev != drm->dev)
1078 dev_set_drvdata(dev, dcrtc);
1079
1080 dcrtc->variant = variant;
1052 dcrtc->base = base; 1081 dcrtc->base = base;
1053 dcrtc->num = num; 1082 dcrtc->num = drm->mode_config.num_crtc;
1054 dcrtc->clk = ERR_PTR(-EINVAL); 1083 dcrtc->clk = ERR_PTR(-EINVAL);
1055 dcrtc->csc_yuv_mode = CSC_AUTO; 1084 dcrtc->csc_yuv_mode = CSC_AUTO;
1056 dcrtc->csc_rgb_mode = CSC_AUTO; 1085 dcrtc->csc_rgb_mode = CSC_AUTO;
@@ -1072,9 +1101,18 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
1072 CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1); 1101 CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
1073 writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1); 1102 writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
1074 writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN); 1103 writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
1104 writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
1105 writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
1106
1107 ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
1108 dcrtc);
1109 if (ret < 0) {
1110 kfree(dcrtc);
1111 return ret;
1112 }
1075 1113
1076 if (priv->variant->crtc_init) { 1114 if (dcrtc->variant->init) {
1077 ret = priv->variant->crtc_init(dcrtc); 1115 ret = dcrtc->variant->init(dcrtc, dev);
1078 if (ret) { 1116 if (ret) {
1079 kfree(dcrtc); 1117 kfree(dcrtc);
1080 return ret; 1118 return ret;
@@ -1086,7 +1124,8 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
1086 1124
1087 priv->dcrtc[dcrtc->num] = dcrtc; 1125 priv->dcrtc[dcrtc->num] = dcrtc;
1088 1126
1089 drm_crtc_init(dev, &dcrtc->crtc, &armada_crtc_funcs); 1127 dcrtc->crtc.port = port;
1128 drm_crtc_init(drm, &dcrtc->crtc, &armada_crtc_funcs);
1090 drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs); 1129 drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
1091 1130
1092 drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop, 1131 drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
@@ -1094,5 +1133,107 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
1094 drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop, 1133 drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
1095 dcrtc->csc_rgb_mode); 1134 dcrtc->csc_rgb_mode);
1096 1135
1097 return armada_overlay_plane_create(dev, 1 << dcrtc->num); 1136 return armada_overlay_plane_create(drm, 1 << dcrtc->num);
1137}
1138
1139static int
1140armada_lcd_bind(struct device *dev, struct device *master, void *data)
1141{
1142 struct platform_device *pdev = to_platform_device(dev);
1143 struct drm_device *drm = data;
1144 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1145 int irq = platform_get_irq(pdev, 0);
1146 const struct armada_variant *variant;
1147 struct device_node *port = NULL;
1148
1149 if (irq < 0)
1150 return irq;
1151
1152 if (!dev->of_node) {
1153 const struct platform_device_id *id;
1154
1155 id = platform_get_device_id(pdev);
1156 if (!id)
1157 return -ENXIO;
1158
1159 variant = (const struct armada_variant *)id->driver_data;
1160 } else {
1161 const struct of_device_id *match;
1162 struct device_node *np, *parent = dev->of_node;
1163
1164 match = of_match_device(dev->driver->of_match_table, dev);
1165 if (!match)
1166 return -ENXIO;
1167
1168 np = of_get_child_by_name(parent, "ports");
1169 if (np)
1170 parent = np;
1171 port = of_get_child_by_name(parent, "port");
1172 of_node_put(np);
1173 if (!port) {
1174 dev_err(dev, "no port node found in %s\n",
1175 parent->full_name);
1176 return -ENXIO;
1177 }
1178
1179 variant = match->data;
1180 }
1181
1182 return armada_drm_crtc_create(drm, dev, res, irq, variant, port);
1183}
1184
1185static void
1186armada_lcd_unbind(struct device *dev, struct device *master, void *data)
1187{
1188 struct armada_crtc *dcrtc = dev_get_drvdata(dev);
1189
1190 armada_drm_crtc_destroy(&dcrtc->crtc);
1098} 1191}
1192
1193static const struct component_ops armada_lcd_ops = {
1194 .bind = armada_lcd_bind,
1195 .unbind = armada_lcd_unbind,
1196};
1197
1198static int armada_lcd_probe(struct platform_device *pdev)
1199{
1200 return component_add(&pdev->dev, &armada_lcd_ops);
1201}
1202
1203static int armada_lcd_remove(struct platform_device *pdev)
1204{
1205 component_del(&pdev->dev, &armada_lcd_ops);
1206 return 0;
1207}
1208
1209static struct of_device_id armada_lcd_of_match[] = {
1210 {
1211 .compatible = "marvell,dove-lcd",
1212 .data = &armada510_ops,
1213 },
1214 {}
1215};
1216MODULE_DEVICE_TABLE(of, armada_lcd_of_match);
1217
1218static const struct platform_device_id armada_lcd_platform_ids[] = {
1219 {
1220 .name = "armada-lcd",
1221 .driver_data = (unsigned long)&armada510_ops,
1222 }, {
1223 .name = "armada-510-lcd",
1224 .driver_data = (unsigned long)&armada510_ops,
1225 },
1226 { },
1227};
1228MODULE_DEVICE_TABLE(platform, armada_lcd_platform_ids);
1229
1230struct platform_driver armada_lcd_platform_driver = {
1231 .probe = armada_lcd_probe,
1232 .remove = armada_lcd_remove,
1233 .driver = {
1234 .name = "armada-lcd",
1235 .owner = THIS_MODULE,
1236 .of_match_table = armada_lcd_of_match,
1237 },
1238 .id_table = armada_lcd_platform_ids,
1239};
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index 9c10a07e7492..98102a5a9af5 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -32,12 +32,15 @@ struct armada_regs {
32 armada_reg_queue_mod(_r, _i, 0, 0, ~0) 32 armada_reg_queue_mod(_r, _i, 0, 0, ~0)
33 33
34struct armada_frame_work; 34struct armada_frame_work;
35struct armada_variant;
35 36
36struct armada_crtc { 37struct armada_crtc {
37 struct drm_crtc crtc; 38 struct drm_crtc crtc;
39 const struct armada_variant *variant;
38 unsigned num; 40 unsigned num;
39 void __iomem *base; 41 void __iomem *base;
40 struct clk *clk; 42 struct clk *clk;
43 struct clk *extclk[2];
41 struct { 44 struct {
42 uint32_t spu_v_h_total; 45 uint32_t spu_v_h_total;
43 uint32_t spu_v_porch; 46 uint32_t spu_v_porch;
@@ -72,12 +75,16 @@ struct armada_crtc {
72}; 75};
73#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc) 76#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
74 77
75int armada_drm_crtc_create(struct drm_device *, unsigned, struct resource *); 78struct device_node;
79int armada_drm_crtc_create(struct drm_device *, struct device *,
80 struct resource *, int, const struct armada_variant *,
81 struct device_node *);
76void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int); 82void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
77void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int); 83void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
78void armada_drm_crtc_irq(struct armada_crtc *, u32);
79void armada_drm_crtc_disable_irq(struct armada_crtc *, u32); 84void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
80void armada_drm_crtc_enable_irq(struct armada_crtc *, u32); 85void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
81void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *); 86void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
82 87
88extern struct platform_driver armada_lcd_platform_driver;
89
83#endif 90#endif
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index a72cae03b99b..ea63c6c7c66f 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -59,26 +59,23 @@ void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *,
59struct armada_private; 59struct armada_private;
60 60
61struct armada_variant { 61struct armada_variant {
62 bool has_spu_adv_reg; 62 bool has_spu_adv_reg;
63 uint32_t spu_adv_reg; 63 uint32_t spu_adv_reg;
64 int (*init)(struct armada_private *, struct device *); 64 int (*init)(struct armada_crtc *, struct device *);
65 int (*crtc_init)(struct armada_crtc *); 65 int (*compute_clock)(struct armada_crtc *,
66 int (*crtc_compute_clock)(struct armada_crtc *, 66 const struct drm_display_mode *,
67 const struct drm_display_mode *, 67 uint32_t *);
68 uint32_t *);
69}; 68};
70 69
71/* Variant ops */ 70/* Variant ops */
72extern const struct armada_variant armada510_ops; 71extern const struct armada_variant armada510_ops;
73 72
74struct armada_private { 73struct armada_private {
75 const struct armada_variant *variant;
76 struct work_struct fb_unref_work; 74 struct work_struct fb_unref_work;
77 DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8); 75 DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
78 struct drm_fb_helper *fbdev; 76 struct drm_fb_helper *fbdev;
79 struct armada_crtc *dcrtc[2]; 77 struct armada_crtc *dcrtc[2];
80 struct drm_mm linear; 78 struct drm_mm linear;
81 struct clk *extclk[2];
82 struct drm_property *csc_yuv_prop; 79 struct drm_property *csc_yuv_prop;
83 struct drm_property *csc_rgb_prop; 80 struct drm_property *csc_rgb_prop;
84 struct drm_property *colorkey_prop; 81 struct drm_property *colorkey_prop;
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 8ab3cd1a8cdb..e2d5792b140f 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -6,7 +6,9 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8#include <linux/clk.h> 8#include <linux/clk.h>
9#include <linux/component.h>
9#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/of_graph.h>
10#include <drm/drmP.h> 12#include <drm/drmP.h>
11#include <drm/drm_crtc_helper.h> 13#include <drm/drm_crtc_helper.h>
12#include "armada_crtc.h" 14#include "armada_crtc.h"
@@ -52,6 +54,11 @@ static const struct armada_drm_slave_config tda19988_config = {
52}; 54};
53#endif 55#endif
54 56
57static bool is_componentized(struct device *dev)
58{
59 return dev->of_node || dev->platform_data;
60}
61
55static void armada_drm_unref_work(struct work_struct *work) 62static void armada_drm_unref_work(struct work_struct *work)
56{ 63{
57 struct armada_private *priv = 64 struct armada_private *priv =
@@ -85,6 +92,7 @@ void armada_drm_queue_unref_work(struct drm_device *dev,
85static int armada_drm_load(struct drm_device *dev, unsigned long flags) 92static int armada_drm_load(struct drm_device *dev, unsigned long flags)
86{ 93{
87 const struct platform_device_id *id; 94 const struct platform_device_id *id;
95 const struct armada_variant *variant;
88 struct armada_private *priv; 96 struct armada_private *priv;
89 struct resource *res[ARRAY_SIZE(priv->dcrtc)]; 97 struct resource *res[ARRAY_SIZE(priv->dcrtc)];
90 struct resource *mem = NULL; 98 struct resource *mem = NULL;
@@ -107,7 +115,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
107 return -EINVAL; 115 return -EINVAL;
108 } 116 }
109 117
110 if (!res[0] || !mem) 118 if (!mem)
111 return -ENXIO; 119 return -ENXIO;
112 120
113 if (!devm_request_mem_region(dev->dev, mem->start, 121 if (!devm_request_mem_region(dev->dev, mem->start,
@@ -128,11 +136,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
128 if (!id) 136 if (!id)
129 return -ENXIO; 137 return -ENXIO;
130 138
131 priv->variant = (struct armada_variant *)id->driver_data; 139 variant = (const struct armada_variant *)id->driver_data;
132
133 ret = priv->variant->init(priv, dev->dev);
134 if (ret)
135 return ret;
136 140
137 INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work); 141 INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
138 INIT_KFIFO(priv->fb_unref); 142 INIT_KFIFO(priv->fb_unref);
@@ -155,40 +159,50 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
155 159
156 /* Create all LCD controllers */ 160 /* Create all LCD controllers */
157 for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) { 161 for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
162 int irq;
163
158 if (!res[n]) 164 if (!res[n])
159 break; 165 break;
160 166
161 ret = armada_drm_crtc_create(dev, n, res[n]); 167 irq = platform_get_irq(dev->platformdev, n);
168 if (irq < 0)
169 goto err_kms;
170
171 ret = armada_drm_crtc_create(dev, dev->dev, res[n], irq,
172 variant, NULL);
162 if (ret) 173 if (ret)
163 goto err_kms; 174 goto err_kms;
164 } 175 }
165 176
177 if (is_componentized(dev->dev)) {
178 ret = component_bind_all(dev->dev, dev);
179 if (ret)
180 goto err_kms;
181 } else {
166#ifdef CONFIG_DRM_ARMADA_TDA1998X 182#ifdef CONFIG_DRM_ARMADA_TDA1998X
167 ret = armada_drm_connector_slave_create(dev, &tda19988_config); 183 ret = armada_drm_connector_slave_create(dev, &tda19988_config);
168 if (ret) 184 if (ret)
169 goto err_kms; 185 goto err_kms;
170#endif 186#endif
187 }
171 188
172 ret = drm_vblank_init(dev, n); 189 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
173 if (ret)
174 goto err_kms;
175
176 ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
177 if (ret) 190 if (ret)
178 goto err_kms; 191 goto err_comp;
179 192
180 dev->vblank_disable_allowed = 1; 193 dev->vblank_disable_allowed = 1;
181 194
182 ret = armada_fbdev_init(dev); 195 ret = armada_fbdev_init(dev);
183 if (ret) 196 if (ret)
184 goto err_irq; 197 goto err_comp;
185 198
186 drm_kms_helper_poll_init(dev); 199 drm_kms_helper_poll_init(dev);
187 200
188 return 0; 201 return 0;
189 202
190 err_irq: 203 err_comp:
191 drm_irq_uninstall(dev); 204 if (is_componentized(dev->dev))
205 component_unbind_all(dev->dev, dev);
192 err_kms: 206 err_kms:
193 drm_mode_config_cleanup(dev); 207 drm_mode_config_cleanup(dev);
194 drm_mm_takedown(&priv->linear); 208 drm_mm_takedown(&priv->linear);
@@ -203,7 +217,10 @@ static int armada_drm_unload(struct drm_device *dev)
203 217
204 drm_kms_helper_poll_fini(dev); 218 drm_kms_helper_poll_fini(dev);
205 armada_fbdev_fini(dev); 219 armada_fbdev_fini(dev);
206 drm_irq_uninstall(dev); 220
221 if (is_componentized(dev->dev))
222 component_unbind_all(dev->dev, dev);
223
207 drm_mode_config_cleanup(dev); 224 drm_mode_config_cleanup(dev);
208 drm_mm_takedown(&priv->linear); 225 drm_mm_takedown(&priv->linear);
209 flush_work(&priv->fb_unref_work); 226 flush_work(&priv->fb_unref_work);
@@ -259,52 +276,6 @@ static void armada_drm_disable_vblank(struct drm_device *dev, int crtc)
259 armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA); 276 armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
260} 277}
261 278
262static irqreturn_t armada_drm_irq_handler(int irq, void *arg)
263{
264 struct drm_device *dev = arg;
265 struct armada_private *priv = dev->dev_private;
266 struct armada_crtc *dcrtc = priv->dcrtc[0];
267 uint32_t v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
268 irqreturn_t handled = IRQ_NONE;
269
270 /*
271 * This is rediculous - rather than writing bits to clear, we
272 * have to set the actual status register value. This is racy.
273 */
274 writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
275
276 /* Mask out those interrupts we haven't enabled */
277 v = stat & dcrtc->irq_ena;
278
279 if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
280 armada_drm_crtc_irq(dcrtc, stat);
281 handled = IRQ_HANDLED;
282 }
283
284 return handled;
285}
286
287static int armada_drm_irq_postinstall(struct drm_device *dev)
288{
289 struct armada_private *priv = dev->dev_private;
290 struct armada_crtc *dcrtc = priv->dcrtc[0];
291
292 spin_lock_irq(&dev->vbl_lock);
293 writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
294 writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
295 spin_unlock_irq(&dev->vbl_lock);
296
297 return 0;
298}
299
300static void armada_drm_irq_uninstall(struct drm_device *dev)
301{
302 struct armada_private *priv = dev->dev_private;
303 struct armada_crtc *dcrtc = priv->dcrtc[0];
304
305 writel(0, dcrtc->base + LCD_SPU_IRQ_ENA);
306}
307
308static struct drm_ioctl_desc armada_ioctls[] = { 279static struct drm_ioctl_desc armada_ioctls[] = {
309 DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl, 280 DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
310 DRM_UNLOCKED), 281 DRM_UNLOCKED),
@@ -340,9 +311,6 @@ static struct drm_driver armada_drm_driver = {
340 .get_vblank_counter = drm_vblank_count, 311 .get_vblank_counter = drm_vblank_count,
341 .enable_vblank = armada_drm_enable_vblank, 312 .enable_vblank = armada_drm_enable_vblank,
342 .disable_vblank = armada_drm_disable_vblank, 313 .disable_vblank = armada_drm_disable_vblank,
343 .irq_handler = armada_drm_irq_handler,
344 .irq_postinstall = armada_drm_irq_postinstall,
345 .irq_uninstall = armada_drm_irq_uninstall,
346#ifdef CONFIG_DEBUG_FS 314#ifdef CONFIG_DEBUG_FS
347 .debugfs_init = armada_drm_debugfs_init, 315 .debugfs_init = armada_drm_debugfs_init,
348 .debugfs_cleanup = armada_drm_debugfs_cleanup, 316 .debugfs_cleanup = armada_drm_debugfs_cleanup,
@@ -362,19 +330,140 @@ static struct drm_driver armada_drm_driver = {
362 .desc = "Armada SoC DRM", 330 .desc = "Armada SoC DRM",
363 .date = "20120730", 331 .date = "20120730",
364 .driver_features = DRIVER_GEM | DRIVER_MODESET | 332 .driver_features = DRIVER_GEM | DRIVER_MODESET |
365 DRIVER_HAVE_IRQ | DRIVER_PRIME, 333 DRIVER_PRIME,
366 .ioctls = armada_ioctls, 334 .ioctls = armada_ioctls,
367 .fops = &armada_drm_fops, 335 .fops = &armada_drm_fops,
368}; 336};
369 337
338static int armada_drm_bind(struct device *dev)
339{
340 return drm_platform_init(&armada_drm_driver, to_platform_device(dev));
341}
342
343static void armada_drm_unbind(struct device *dev)
344{
345 drm_put_dev(dev_get_drvdata(dev));
346}
347
348static int compare_of(struct device *dev, void *data)
349{
350 return dev->of_node == data;
351}
352
353static int compare_dev_name(struct device *dev, void *data)
354{
355 const char *name = data;
356 return !strcmp(dev_name(dev), name);
357}
358
359static void armada_add_endpoints(struct device *dev,
360 struct component_match **match, struct device_node *port)
361{
362 struct device_node *ep, *remote;
363
364 for_each_child_of_node(port, ep) {
365 remote = of_graph_get_remote_port_parent(ep);
366 if (!remote || !of_device_is_available(remote)) {
367 of_node_put(remote);
368 continue;
369 } else if (!of_device_is_available(remote->parent)) {
370 dev_warn(dev, "parent device of %s is not available\n",
371 remote->full_name);
372 of_node_put(remote);
373 continue;
374 }
375
376 component_match_add(dev, match, compare_of, remote);
377 of_node_put(remote);
378 }
379}
380
381static int armada_drm_find_components(struct device *dev,
382 struct component_match **match)
383{
384 struct device_node *port;
385 int i;
386
387 if (dev->of_node) {
388 struct device_node *np = dev->of_node;
389
390 for (i = 0; ; i++) {
391 port = of_parse_phandle(np, "ports", i);
392 if (!port)
393 break;
394
395 component_match_add(dev, match, compare_of, port);
396 of_node_put(port);
397 }
398
399 if (i == 0) {
400 dev_err(dev, "missing 'ports' property\n");
401 return -ENODEV;
402 }
403
404 for (i = 0; ; i++) {
405 port = of_parse_phandle(np, "ports", i);
406 if (!port)
407 break;
408
409 armada_add_endpoints(dev, match, port);
410 of_node_put(port);
411 }
412 } else if (dev->platform_data) {
413 char **devices = dev->platform_data;
414 struct device *d;
415
416 for (i = 0; devices[i]; i++)
417 component_match_add(dev, match, compare_dev_name,
418 devices[i]);
419
420 if (i == 0) {
421 dev_err(dev, "missing 'ports' property\n");
422 return -ENODEV;
423 }
424
425 for (i = 0; devices[i]; i++) {
426 d = bus_find_device_by_name(&platform_bus_type, NULL,
427 devices[i]);
428 if (d && d->of_node) {
429 for_each_child_of_node(d->of_node, port)
430 armada_add_endpoints(dev, match, port);
431 }
432 put_device(d);
433 }
434 }
435
436 return 0;
437}
438
439static const struct component_master_ops armada_master_ops = {
440 .bind = armada_drm_bind,
441 .unbind = armada_drm_unbind,
442};
443
370static int armada_drm_probe(struct platform_device *pdev) 444static int armada_drm_probe(struct platform_device *pdev)
371{ 445{
372 return drm_platform_init(&armada_drm_driver, pdev); 446 if (is_componentized(&pdev->dev)) {
447 struct component_match *match = NULL;
448 int ret;
449
450 ret = armada_drm_find_components(&pdev->dev, &match);
451 if (ret < 0)
452 return ret;
453
454 return component_master_add_with_match(&pdev->dev,
455 &armada_master_ops, match);
456 } else {
457 return drm_platform_init(&armada_drm_driver, pdev);
458 }
373} 459}
374 460
375static int armada_drm_remove(struct platform_device *pdev) 461static int armada_drm_remove(struct platform_device *pdev)
376{ 462{
377 drm_put_dev(platform_get_drvdata(pdev)); 463 if (is_componentized(&pdev->dev))
464 component_master_del(&pdev->dev, &armada_master_ops);
465 else
466 drm_put_dev(platform_get_drvdata(pdev));
378 return 0; 467 return 0;
379} 468}
380 469
@@ -402,14 +491,24 @@ static struct platform_driver armada_drm_platform_driver = {
402 491
403static int __init armada_drm_init(void) 492static int __init armada_drm_init(void)
404{ 493{
494 int ret;
495
405 armada_drm_driver.num_ioctls = ARRAY_SIZE(armada_ioctls); 496 armada_drm_driver.num_ioctls = ARRAY_SIZE(armada_ioctls);
406 return platform_driver_register(&armada_drm_platform_driver); 497
498 ret = platform_driver_register(&armada_lcd_platform_driver);
499 if (ret)
500 return ret;
501 ret = platform_driver_register(&armada_drm_platform_driver);
502 if (ret)
503 platform_driver_unregister(&armada_lcd_platform_driver);
504 return ret;
407} 505}
408module_init(armada_drm_init); 506module_init(armada_drm_init);
409 507
410static void __exit armada_drm_exit(void) 508static void __exit armada_drm_exit(void)
411{ 509{
412 platform_driver_unregister(&armada_drm_platform_driver); 510 platform_driver_unregister(&armada_drm_platform_driver);
511 platform_driver_unregister(&armada_lcd_platform_driver);
413} 512}
414module_exit(armada_drm_exit); 513module_exit(armada_drm_exit);
415 514
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index fd166f532ab9..7838e731b0de 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -131,7 +131,7 @@ static int armada_fb_probe(struct drm_fb_helper *fbh,
131 return ret; 131 return ret;
132} 132}
133 133
134static struct drm_fb_helper_funcs armada_fb_helper_funcs = { 134static const struct drm_fb_helper_funcs armada_fb_helper_funcs = {
135 .gamma_set = armada_drm_crtc_gamma_set, 135 .gamma_set = armada_drm_crtc_gamma_set,
136 .gamma_get = armada_drm_crtc_gamma_get, 136 .gamma_get = armada_drm_crtc_gamma_get,
137 .fb_probe = armada_fb_probe, 137 .fb_probe = armada_fb_probe,
@@ -149,7 +149,7 @@ int armada_fbdev_init(struct drm_device *dev)
149 149
150 priv->fbdev = fbh; 150 priv->fbdev = fbh;
151 151
152 fbh->funcs = &armada_fb_helper_funcs; 152 drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs);
153 153
154 ret = drm_fb_helper_init(dev, fbh, 1, 1); 154 ret = drm_fb_helper_init(dev, fbh, 1, 1);
155 if (ret) { 155 if (ret) {
diff --git a/drivers/gpu/drm/armada/armada_output.c b/drivers/gpu/drm/armada/armada_output.c
index d685a5421485..abbc309fe539 100644
--- a/drivers/gpu/drm/armada/armada_output.c
+++ b/drivers/gpu/drm/armada/armada_output.c
@@ -48,7 +48,7 @@ static void armada_drm_connector_destroy(struct drm_connector *conn)
48{ 48{
49 struct armada_connector *dconn = drm_to_armada_conn(conn); 49 struct armada_connector *dconn = drm_to_armada_conn(conn);
50 50
51 drm_sysfs_connector_remove(conn); 51 drm_connector_unregister(conn);
52 drm_connector_cleanup(conn); 52 drm_connector_cleanup(conn);
53 kfree(dconn); 53 kfree(dconn);
54} 54}
@@ -141,7 +141,7 @@ int armada_output_create(struct drm_device *dev,
141 if (ret) 141 if (ret)
142 goto err_conn; 142 goto err_conn;
143 143
144 ret = drm_sysfs_connector_add(&dconn->conn); 144 ret = drm_connector_register(&dconn->conn);
145 if (ret) 145 if (ret)
146 goto err_sysfs; 146 goto err_sysfs;
147 147
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 5d6a87573c33..957d4fabf1e1 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -362,7 +362,7 @@ static inline int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
362{ 362{
363 int ret; 363 int ret;
364 364
365 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); 365 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
366 if (ret) { 366 if (ret) {
367 if (ret != -ERESTARTSYS && ret != -EBUSY) 367 if (ret != -ERESTARTSYS && ret != -EBUSY)
368 DRM_ERROR("reserve failed %p\n", bo); 368 DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index a28640f47c27..cba45c774552 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -287,7 +287,7 @@ static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
287 *blue = ast_crtc->lut_b[regno] << 8; 287 *blue = ast_crtc->lut_b[regno] << 8;
288} 288}
289 289
290static struct drm_fb_helper_funcs ast_fb_helper_funcs = { 290static const struct drm_fb_helper_funcs ast_fb_helper_funcs = {
291 .gamma_set = ast_fb_gamma_set, 291 .gamma_set = ast_fb_gamma_set,
292 .gamma_get = ast_fb_gamma_get, 292 .gamma_get = ast_fb_gamma_get,
293 .fb_probe = astfb_create, 293 .fb_probe = astfb_create,
@@ -328,8 +328,10 @@ int ast_fbdev_init(struct drm_device *dev)
328 return -ENOMEM; 328 return -ENOMEM;
329 329
330 ast->fbdev = afbdev; 330 ast->fbdev = afbdev;
331 afbdev->helper.funcs = &ast_fb_helper_funcs;
332 spin_lock_init(&afbdev->dirty_lock); 331 spin_lock_init(&afbdev->dirty_lock);
332
333 drm_fb_helper_prepare(dev, &afbdev->helper, &ast_fb_helper_funcs);
334
333 ret = drm_fb_helper_init(dev, &afbdev->helper, 335 ret = drm_fb_helper_init(dev, &afbdev->helper,
334 1, 1); 336 1, 1);
335 if (ret) { 337 if (ret) {
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 114aee941d46..5389350244f2 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -667,17 +667,9 @@ static void ast_encoder_destroy(struct drm_encoder *encoder)
667static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector) 667static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector)
668{ 668{
669 int enc_id = connector->encoder_ids[0]; 669 int enc_id = connector->encoder_ids[0];
670 struct drm_mode_object *obj;
671 struct drm_encoder *encoder;
672
673 /* pick the encoder ids */ 670 /* pick the encoder ids */
674 if (enc_id) { 671 if (enc_id)
675 obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); 672 return drm_encoder_find(connector->dev, enc_id);
676 if (!obj)
677 return NULL;
678 encoder = obj_to_encoder(obj);
679 return encoder;
680 }
681 return NULL; 673 return NULL;
682} 674}
683 675
@@ -829,7 +821,7 @@ static void ast_connector_destroy(struct drm_connector *connector)
829{ 821{
830 struct ast_connector *ast_connector = to_ast_connector(connector); 822 struct ast_connector *ast_connector = to_ast_connector(connector);
831 ast_i2c_destroy(ast_connector->i2c); 823 ast_i2c_destroy(ast_connector->i2c);
832 drm_sysfs_connector_remove(connector); 824 drm_connector_unregister(connector);
833 drm_connector_cleanup(connector); 825 drm_connector_cleanup(connector);
834 kfree(connector); 826 kfree(connector);
835} 827}
@@ -871,7 +863,7 @@ static int ast_connector_init(struct drm_device *dev)
871 connector->interlace_allowed = 0; 863 connector->interlace_allowed = 0;
872 connector->doublescan_allowed = 0; 864 connector->doublescan_allowed = 0;
873 865
874 drm_sysfs_connector_add(connector); 866 drm_connector_register(connector);
875 867
876 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 868 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
877 869
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 9c13df29fd20..f5e0ead974a6 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -97,6 +97,7 @@ static struct drm_driver bochs_driver = {
97/* ---------------------------------------------------------------------- */ 97/* ---------------------------------------------------------------------- */
98/* pm interface */ 98/* pm interface */
99 99
100#ifdef CONFIG_PM_SLEEP
100static int bochs_pm_suspend(struct device *dev) 101static int bochs_pm_suspend(struct device *dev)
101{ 102{
102 struct pci_dev *pdev = to_pci_dev(dev); 103 struct pci_dev *pdev = to_pci_dev(dev);
@@ -131,6 +132,7 @@ static int bochs_pm_resume(struct device *dev)
131 drm_kms_helper_poll_enable(drm_dev); 132 drm_kms_helper_poll_enable(drm_dev);
132 return 0; 133 return 0;
133} 134}
135#endif
134 136
135static const struct dev_pm_ops bochs_pm_ops = { 137static const struct dev_pm_ops bochs_pm_ops = {
136 SET_SYSTEM_SLEEP_PM_OPS(bochs_pm_suspend, 138 SET_SYSTEM_SLEEP_PM_OPS(bochs_pm_suspend,
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 561b84474122..fe95d31cd110 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -72,7 +72,7 @@ static int bochsfb_create(struct drm_fb_helper *helper,
72 72
73 bo = gem_to_bochs_bo(gobj); 73 bo = gem_to_bochs_bo(gobj);
74 74
75 ret = ttm_bo_reserve(&bo->bo, true, false, false, 0); 75 ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
76 if (ret) 76 if (ret)
77 return ret; 77 return ret;
78 78
@@ -179,7 +179,7 @@ void bochs_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
179 *blue = regno; 179 *blue = regno;
180} 180}
181 181
182static struct drm_fb_helper_funcs bochs_fb_helper_funcs = { 182static const struct drm_fb_helper_funcs bochs_fb_helper_funcs = {
183 .gamma_set = bochs_fb_gamma_set, 183 .gamma_set = bochs_fb_gamma_set,
184 .gamma_get = bochs_fb_gamma_get, 184 .gamma_get = bochs_fb_gamma_get,
185 .fb_probe = bochsfb_create, 185 .fb_probe = bochsfb_create,
@@ -189,7 +189,8 @@ int bochs_fbdev_init(struct bochs_device *bochs)
189{ 189{
190 int ret; 190 int ret;
191 191
192 bochs->fb.helper.funcs = &bochs_fb_helper_funcs; 192 drm_fb_helper_prepare(bochs->dev, &bochs->fb.helper,
193 &bochs_fb_helper_funcs);
193 194
194 ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper, 195 ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper,
195 1, 1); 196 1, 1);
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index dcf2e55f4ae9..9d7346b92653 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -53,7 +53,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
53 if (old_fb) { 53 if (old_fb) {
54 bochs_fb = to_bochs_framebuffer(old_fb); 54 bochs_fb = to_bochs_framebuffer(old_fb);
55 bo = gem_to_bochs_bo(bochs_fb->obj); 55 bo = gem_to_bochs_bo(bochs_fb->obj);
56 ret = ttm_bo_reserve(&bo->bo, true, false, false, 0); 56 ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
57 if (ret) { 57 if (ret) {
58 DRM_ERROR("failed to reserve old_fb bo\n"); 58 DRM_ERROR("failed to reserve old_fb bo\n");
59 } else { 59 } else {
@@ -67,7 +67,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
67 67
68 bochs_fb = to_bochs_framebuffer(crtc->primary->fb); 68 bochs_fb = to_bochs_framebuffer(crtc->primary->fb);
69 bo = gem_to_bochs_bo(bochs_fb->obj); 69 bo = gem_to_bochs_bo(bochs_fb->obj);
70 ret = ttm_bo_reserve(&bo->bo, true, false, false, 0); 70 ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
71 if (ret) 71 if (ret)
72 return ret; 72 return ret;
73 73
@@ -216,18 +216,9 @@ static struct drm_encoder *
216bochs_connector_best_encoder(struct drm_connector *connector) 216bochs_connector_best_encoder(struct drm_connector *connector)
217{ 217{
218 int enc_id = connector->encoder_ids[0]; 218 int enc_id = connector->encoder_ids[0];
219 struct drm_mode_object *obj;
220 struct drm_encoder *encoder;
221
222 /* pick the encoder ids */ 219 /* pick the encoder ids */
223 if (enc_id) { 220 if (enc_id)
224 obj = drm_mode_object_find(connector->dev, enc_id, 221 return drm_encoder_find(connector->dev, enc_id);
225 DRM_MODE_OBJECT_ENCODER);
226 if (!obj)
227 return NULL;
228 encoder = obj_to_encoder(obj);
229 return encoder;
230 }
231 return NULL; 222 return NULL;
232} 223}
233 224
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index b9a695d92792..1728a1b0b813 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -387,7 +387,7 @@ int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel,
387 387
388 *obj = NULL; 388 *obj = NULL;
389 389
390 size = ALIGN(size, PAGE_SIZE); 390 size = PAGE_ALIGN(size);
391 if (size == 0) 391 if (size == 0)
392 return -EINVAL; 392 return -EINVAL;
393 393
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c
index 98fd17ae4916..d466696ed5e8 100644
--- a/drivers/gpu/drm/bridge/ptn3460.c
+++ b/drivers/gpu/drm/bridge/ptn3460.c
@@ -328,7 +328,7 @@ int ptn3460_init(struct drm_device *dev, struct drm_encoder *encoder,
328 } 328 }
329 drm_connector_helper_add(&ptn_bridge->connector, 329 drm_connector_helper_add(&ptn_bridge->connector,
330 &ptn3460_connector_helper_funcs); 330 &ptn3460_connector_helper_funcs);
331 drm_sysfs_connector_add(&ptn_bridge->connector); 331 drm_connector_register(&ptn_bridge->connector);
332 drm_mode_connector_attach_encoder(&ptn_bridge->connector, encoder); 332 drm_mode_connector_attach_encoder(&ptn_bridge->connector, encoder);
333 333
334 return 0; 334 return 0;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 08ce520f61a5..4516b052cc67 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -76,6 +76,7 @@ static void cirrus_pci_remove(struct pci_dev *pdev)
76 drm_put_dev(dev); 76 drm_put_dev(dev);
77} 77}
78 78
79#ifdef CONFIG_PM_SLEEP
79static int cirrus_pm_suspend(struct device *dev) 80static int cirrus_pm_suspend(struct device *dev)
80{ 81{
81 struct pci_dev *pdev = to_pci_dev(dev); 82 struct pci_dev *pdev = to_pci_dev(dev);
@@ -110,6 +111,7 @@ static int cirrus_pm_resume(struct device *dev)
110 drm_kms_helper_poll_enable(drm_dev); 111 drm_kms_helper_poll_enable(drm_dev);
111 return 0; 112 return 0;
112} 113}
114#endif
113 115
114static const struct file_operations cirrus_driver_fops = { 116static const struct file_operations cirrus_driver_fops = {
115 .owner = THIS_MODULE, 117 .owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 117d3eca5e37..401c890b6c6a 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -241,7 +241,7 @@ static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
241{ 241{
242 int ret; 242 int ret;
243 243
244 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); 244 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
245 if (ret) { 245 if (ret) {
246 if (ret != -ERESTARTSYS && ret != -EBUSY) 246 if (ret != -ERESTARTSYS && ret != -EBUSY)
247 DRM_ERROR("reserve failed %p\n", bo); 247 DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 32bbba0a787b..2a135f253e29 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -288,7 +288,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
288 return 0; 288 return 0;
289} 289}
290 290
291static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = { 291static const struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
292 .gamma_set = cirrus_crtc_fb_gamma_set, 292 .gamma_set = cirrus_crtc_fb_gamma_set,
293 .gamma_get = cirrus_crtc_fb_gamma_get, 293 .gamma_get = cirrus_crtc_fb_gamma_get,
294 .fb_probe = cirrusfb_create, 294 .fb_probe = cirrusfb_create,
@@ -306,9 +306,11 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
306 return -ENOMEM; 306 return -ENOMEM;
307 307
308 cdev->mode_info.gfbdev = gfbdev; 308 cdev->mode_info.gfbdev = gfbdev;
309 gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
310 spin_lock_init(&gfbdev->dirty_lock); 309 spin_lock_init(&gfbdev->dirty_lock);
311 310
311 drm_fb_helper_prepare(cdev->dev, &gfbdev->helper,
312 &cirrus_fb_helper_funcs);
313
312 ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper, 314 ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
313 cdev->num_crtc, CIRRUSFB_CONN_LIMIT); 315 cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
314 if (ret) { 316 if (ret) {
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 49332c5fe35b..e1c5c3222129 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -509,19 +509,9 @@ static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
509 *connector) 509 *connector)
510{ 510{
511 int enc_id = connector->encoder_ids[0]; 511 int enc_id = connector->encoder_ids[0];
512 struct drm_mode_object *obj;
513 struct drm_encoder *encoder;
514
515 /* pick the encoder ids */ 512 /* pick the encoder ids */
516 if (enc_id) { 513 if (enc_id)
517 obj = 514 return drm_encoder_find(connector->dev, enc_id);
518 drm_mode_object_find(connector->dev, enc_id,
519 DRM_MODE_OBJECT_ENCODER);
520 if (!obj)
521 return NULL;
522 encoder = obj_to_encoder(obj);
523 return encoder;
524 }
525 return NULL; 515 return NULL;
526} 516}
527 517
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
index 0406110f83ed..86a4a4a60afc 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -80,11 +80,7 @@ int drm_buffer_alloc(struct drm_buffer **buf, int size)
80 80
81error_out: 81error_out:
82 82
83 /* Only last element can be null pointer so check for it first. */ 83 for (; idx >= 0; --idx)
84 if ((*buf)->data[idx])
85 kfree((*buf)->data[idx]);
86
87 for (--idx; idx >= 0; --idx)
88 kfree((*buf)->data[idx]); 84 kfree((*buf)->data[idx]);
89 85
90 kfree(*buf); 86 kfree(*buf);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 68175b54504b..61acb8f6756d 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -1217,7 +1217,6 @@ int drm_infobufs(struct drm_device *dev, void *data,
1217 struct drm_buf_desc __user *to = 1217 struct drm_buf_desc __user *to =
1218 &request->list[count]; 1218 &request->list[count];
1219 struct drm_buf_entry *from = &dma->bufs[i]; 1219 struct drm_buf_entry *from = &dma->bufs[i];
1220 struct drm_freelist *list = &dma->bufs[i].freelist;
1221 if (copy_to_user(&to->count, 1220 if (copy_to_user(&to->count,
1222 &from->buf_count, 1221 &from->buf_count,
1223 sizeof(from->buf_count)) || 1222 sizeof(from->buf_count)) ||
@@ -1225,19 +1224,19 @@ int drm_infobufs(struct drm_device *dev, void *data,
1225 &from->buf_size, 1224 &from->buf_size,
1226 sizeof(from->buf_size)) || 1225 sizeof(from->buf_size)) ||
1227 copy_to_user(&to->low_mark, 1226 copy_to_user(&to->low_mark,
1228 &list->low_mark, 1227 &from->low_mark,
1229 sizeof(list->low_mark)) || 1228 sizeof(from->low_mark)) ||
1230 copy_to_user(&to->high_mark, 1229 copy_to_user(&to->high_mark,
1231 &list->high_mark, 1230 &from->high_mark,
1232 sizeof(list->high_mark))) 1231 sizeof(from->high_mark)))
1233 return -EFAULT; 1232 return -EFAULT;
1234 1233
1235 DRM_DEBUG("%d %d %d %d %d\n", 1234 DRM_DEBUG("%d %d %d %d %d\n",
1236 i, 1235 i,
1237 dma->bufs[i].buf_count, 1236 dma->bufs[i].buf_count,
1238 dma->bufs[i].buf_size, 1237 dma->bufs[i].buf_size,
1239 dma->bufs[i].freelist.low_mark, 1238 dma->bufs[i].low_mark,
1240 dma->bufs[i].freelist.high_mark); 1239 dma->bufs[i].high_mark);
1241 ++count; 1240 ++count;
1242 } 1241 }
1243 } 1242 }
@@ -1290,8 +1289,8 @@ int drm_markbufs(struct drm_device *dev, void *data,
1290 if (request->high_mark < 0 || request->high_mark > entry->buf_count) 1289 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1291 return -EINVAL; 1290 return -EINVAL;
1292 1291
1293 entry->freelist.low_mark = request->low_mark; 1292 entry->low_mark = request->low_mark;
1294 entry->freelist.high_mark = request->high_mark; 1293 entry->high_mark = request->high_mark;
1295 1294
1296 return 0; 1295 return 0;
1297} 1296}
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index a4b017b6849e..9b23525c0ed0 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -1,18 +1,13 @@
1/**
2 * \file drm_context.c
3 * IOCTLs for generic contexts
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/* 1/*
10 * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com 2 * Legacy: Generic DRM Contexts
11 * 3 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. 4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved. 6 * All Rights Reserved.
15 * 7 *
8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
9 * Author: Gareth Hughes <gareth@valinux.com>
10 *
16 * Permission is hereby granted, free of charge, to any person obtaining a 11 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"), 12 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation 13 * to deal in the Software without restriction, including without limitation
@@ -33,14 +28,14 @@
33 * OTHER DEALINGS IN THE SOFTWARE. 28 * OTHER DEALINGS IN THE SOFTWARE.
34 */ 29 */
35 30
36/*
37 * ChangeLog:
38 * 2001-11-16 Torsten Duwe <duwe@caldera.de>
39 * added context constructor/destructor hooks,
40 * needed by SiS driver's memory management.
41 */
42
43#include <drm/drmP.h> 31#include <drm/drmP.h>
32#include "drm_legacy.h"
33
34struct drm_ctx_list {
35 struct list_head head;
36 drm_context_t handle;
37 struct drm_file *tag;
38};
44 39
45/******************************************************************/ 40/******************************************************************/
46/** \name Context bitmap support */ 41/** \name Context bitmap support */
@@ -56,7 +51,7 @@
56 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex 51 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
57 * lock. 52 * lock.
58 */ 53 */
59void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle) 54void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
60{ 55{
61 mutex_lock(&dev->struct_mutex); 56 mutex_lock(&dev->struct_mutex);
62 idr_remove(&dev->ctx_idr, ctx_handle); 57 idr_remove(&dev->ctx_idr, ctx_handle);
@@ -72,7 +67,7 @@ void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
72 * Allocate a new idr from drm_device::ctx_idr while holding the 67 * Allocate a new idr from drm_device::ctx_idr while holding the
73 * drm_device::struct_mutex lock. 68 * drm_device::struct_mutex lock.
74 */ 69 */
75static int drm_ctxbitmap_next(struct drm_device * dev) 70static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
76{ 71{
77 int ret; 72 int ret;
78 73
@@ -90,7 +85,7 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
90 * 85 *
91 * Initialise the drm_device::ctx_idr 86 * Initialise the drm_device::ctx_idr
92 */ 87 */
93int drm_ctxbitmap_init(struct drm_device * dev) 88int drm_legacy_ctxbitmap_init(struct drm_device * dev)
94{ 89{
95 idr_init(&dev->ctx_idr); 90 idr_init(&dev->ctx_idr);
96 return 0; 91 return 0;
@@ -104,13 +99,43 @@ int drm_ctxbitmap_init(struct drm_device * dev)
104 * Free all idr members using drm_ctx_sarea_free helper function 99 * Free all idr members using drm_ctx_sarea_free helper function
105 * while holding the drm_device::struct_mutex lock. 100 * while holding the drm_device::struct_mutex lock.
106 */ 101 */
107void drm_ctxbitmap_cleanup(struct drm_device * dev) 102void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
108{ 103{
109 mutex_lock(&dev->struct_mutex); 104 mutex_lock(&dev->struct_mutex);
110 idr_destroy(&dev->ctx_idr); 105 idr_destroy(&dev->ctx_idr);
111 mutex_unlock(&dev->struct_mutex); 106 mutex_unlock(&dev->struct_mutex);
112} 107}
113 108
109/**
110 * drm_ctxbitmap_flush() - Flush all contexts owned by a file
111 * @dev: DRM device to operate on
112 * @file: Open file to flush contexts for
113 *
114 * This iterates over all contexts on @dev and drops them if they're owned by
115 * @file. Note that after this call returns, new contexts might be added if
116 * the file is still alive.
117 */
118void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
119{
120 struct drm_ctx_list *pos, *tmp;
121
122 mutex_lock(&dev->ctxlist_mutex);
123
124 list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
125 if (pos->tag == file &&
126 pos->handle != DRM_KERNEL_CONTEXT) {
127 if (dev->driver->context_dtor)
128 dev->driver->context_dtor(dev, pos->handle);
129
130 drm_legacy_ctxbitmap_free(dev, pos->handle);
131 list_del(&pos->head);
132 kfree(pos);
133 }
134 }
135
136 mutex_unlock(&dev->ctxlist_mutex);
137}
138
114/*@}*/ 139/*@}*/
115 140
116/******************************************************************/ 141/******************************************************************/
@@ -129,8 +154,8 @@ void drm_ctxbitmap_cleanup(struct drm_device * dev)
129 * Gets the map from drm_device::ctx_idr with the handle specified and 154 * Gets the map from drm_device::ctx_idr with the handle specified and
130 * returns its handle. 155 * returns its handle.
131 */ 156 */
132int drm_getsareactx(struct drm_device *dev, void *data, 157int drm_legacy_getsareactx(struct drm_device *dev, void *data,
133 struct drm_file *file_priv) 158 struct drm_file *file_priv)
134{ 159{
135 struct drm_ctx_priv_map *request = data; 160 struct drm_ctx_priv_map *request = data;
136 struct drm_local_map *map; 161 struct drm_local_map *map;
@@ -173,8 +198,8 @@ int drm_getsareactx(struct drm_device *dev, void *data,
173 * Searches the mapping specified in \p arg and update the entry in 198 * Searches the mapping specified in \p arg and update the entry in
174 * drm_device::ctx_idr with it. 199 * drm_device::ctx_idr with it.
175 */ 200 */
176int drm_setsareactx(struct drm_device *dev, void *data, 201int drm_legacy_setsareactx(struct drm_device *dev, void *data,
177 struct drm_file *file_priv) 202 struct drm_file *file_priv)
178{ 203{
179 struct drm_ctx_priv_map *request = data; 204 struct drm_ctx_priv_map *request = data;
180 struct drm_local_map *map = NULL; 205 struct drm_local_map *map = NULL;
@@ -273,8 +298,8 @@ static int drm_context_switch_complete(struct drm_device *dev,
273 * \param arg user argument pointing to a drm_ctx_res structure. 298 * \param arg user argument pointing to a drm_ctx_res structure.
274 * \return zero on success or a negative number on failure. 299 * \return zero on success or a negative number on failure.
275 */ 300 */
276int drm_resctx(struct drm_device *dev, void *data, 301int drm_legacy_resctx(struct drm_device *dev, void *data,
277 struct drm_file *file_priv) 302 struct drm_file *file_priv)
278{ 303{
279 struct drm_ctx_res *res = data; 304 struct drm_ctx_res *res = data;
280 struct drm_ctx ctx; 305 struct drm_ctx ctx;
@@ -304,16 +329,16 @@ int drm_resctx(struct drm_device *dev, void *data,
304 * 329 *
305 * Get a new handle for the context and copy to userspace. 330 * Get a new handle for the context and copy to userspace.
306 */ 331 */
307int drm_addctx(struct drm_device *dev, void *data, 332int drm_legacy_addctx(struct drm_device *dev, void *data,
308 struct drm_file *file_priv) 333 struct drm_file *file_priv)
309{ 334{
310 struct drm_ctx_list *ctx_entry; 335 struct drm_ctx_list *ctx_entry;
311 struct drm_ctx *ctx = data; 336 struct drm_ctx *ctx = data;
312 337
313 ctx->handle = drm_ctxbitmap_next(dev); 338 ctx->handle = drm_legacy_ctxbitmap_next(dev);
314 if (ctx->handle == DRM_KERNEL_CONTEXT) { 339 if (ctx->handle == DRM_KERNEL_CONTEXT) {
315 /* Skip kernel's context and get a new one. */ 340 /* Skip kernel's context and get a new one. */
316 ctx->handle = drm_ctxbitmap_next(dev); 341 ctx->handle = drm_legacy_ctxbitmap_next(dev);
317 } 342 }
318 DRM_DEBUG("%d\n", ctx->handle); 343 DRM_DEBUG("%d\n", ctx->handle);
319 if (ctx->handle == -1) { 344 if (ctx->handle == -1) {
@@ -348,7 +373,8 @@ int drm_addctx(struct drm_device *dev, void *data,
348 * \param arg user argument pointing to a drm_ctx structure. 373 * \param arg user argument pointing to a drm_ctx structure.
349 * \return zero on success or a negative number on failure. 374 * \return zero on success or a negative number on failure.
350 */ 375 */
351int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv) 376int drm_legacy_getctx(struct drm_device *dev, void *data,
377 struct drm_file *file_priv)
352{ 378{
353 struct drm_ctx *ctx = data; 379 struct drm_ctx *ctx = data;
354 380
@@ -369,8 +395,8 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
369 * 395 *
370 * Calls context_switch(). 396 * Calls context_switch().
371 */ 397 */
372int drm_switchctx(struct drm_device *dev, void *data, 398int drm_legacy_switchctx(struct drm_device *dev, void *data,
373 struct drm_file *file_priv) 399 struct drm_file *file_priv)
374{ 400{
375 struct drm_ctx *ctx = data; 401 struct drm_ctx *ctx = data;
376 402
@@ -389,8 +415,8 @@ int drm_switchctx(struct drm_device *dev, void *data,
389 * 415 *
390 * Calls context_switch_complete(). 416 * Calls context_switch_complete().
391 */ 417 */
392int drm_newctx(struct drm_device *dev, void *data, 418int drm_legacy_newctx(struct drm_device *dev, void *data,
393 struct drm_file *file_priv) 419 struct drm_file *file_priv)
394{ 420{
395 struct drm_ctx *ctx = data; 421 struct drm_ctx *ctx = data;
396 422
@@ -411,8 +437,8 @@ int drm_newctx(struct drm_device *dev, void *data,
411 * 437 *
412 * If not the special kernel context, calls ctxbitmap_free() to free the specified context. 438 * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
413 */ 439 */
414int drm_rmctx(struct drm_device *dev, void *data, 440int drm_legacy_rmctx(struct drm_device *dev, void *data,
415 struct drm_file *file_priv) 441 struct drm_file *file_priv)
416{ 442{
417 struct drm_ctx *ctx = data; 443 struct drm_ctx *ctx = data;
418 444
@@ -420,7 +446,7 @@ int drm_rmctx(struct drm_device *dev, void *data,
420 if (ctx->handle != DRM_KERNEL_CONTEXT) { 446 if (ctx->handle != DRM_KERNEL_CONTEXT) {
421 if (dev->driver->context_dtor) 447 if (dev->driver->context_dtor)
422 dev->driver->context_dtor(dev, ctx->handle); 448 dev->driver->context_dtor(dev, ctx->handle);
423 drm_ctxbitmap_free(dev, ctx->handle); 449 drm_legacy_ctxbitmap_free(dev, ctx->handle);
424 } 450 }
425 451
426 mutex_lock(&dev->ctxlist_mutex); 452 mutex_lock(&dev->ctxlist_mutex);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index fe94cc10cd35..fa2be249999c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -41,6 +41,10 @@
41 41
42#include "drm_crtc_internal.h" 42#include "drm_crtc_internal.h"
43 43
44static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
45 struct drm_mode_fb_cmd2 *r,
46 struct drm_file *file_priv);
47
44/** 48/**
45 * drm_modeset_lock_all - take all modeset locks 49 * drm_modeset_lock_all - take all modeset locks
46 * @dev: drm device 50 * @dev: drm device
@@ -178,6 +182,12 @@ static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
178 { DRM_MODE_SCALE_ASPECT, "Full aspect" }, 182 { DRM_MODE_SCALE_ASPECT, "Full aspect" },
179}; 183};
180 184
185static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
186 { DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
187 { DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
188 { DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
189};
190
181/* 191/*
182 * Non-global properties, but "required" for certain connectors. 192 * Non-global properties, but "required" for certain connectors.
183 */ 193 */
@@ -357,6 +367,32 @@ const char *drm_get_format_name(uint32_t format)
357} 367}
358EXPORT_SYMBOL(drm_get_format_name); 368EXPORT_SYMBOL(drm_get_format_name);
359 369
370/*
371 * Internal function to assign a slot in the object idr and optionally
372 * register the object into the idr.
373 */
374static int drm_mode_object_get_reg(struct drm_device *dev,
375 struct drm_mode_object *obj,
376 uint32_t obj_type,
377 bool register_obj)
378{
379 int ret;
380
381 mutex_lock(&dev->mode_config.idr_mutex);
382 ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
383 if (ret >= 0) {
384 /*
385 * Set up the object linking under the protection of the idr
386 * lock so that other users can't see inconsistent state.
387 */
388 obj->id = ret;
389 obj->type = obj_type;
390 }
391 mutex_unlock(&dev->mode_config.idr_mutex);
392
393 return ret < 0 ? ret : 0;
394}
395
360/** 396/**
361 * drm_mode_object_get - allocate a new modeset identifier 397 * drm_mode_object_get - allocate a new modeset identifier
362 * @dev: DRM device 398 * @dev: DRM device
@@ -375,21 +411,15 @@ EXPORT_SYMBOL(drm_get_format_name);
375int drm_mode_object_get(struct drm_device *dev, 411int drm_mode_object_get(struct drm_device *dev,
376 struct drm_mode_object *obj, uint32_t obj_type) 412 struct drm_mode_object *obj, uint32_t obj_type)
377{ 413{
378 int ret; 414 return drm_mode_object_get_reg(dev, obj, obj_type, true);
415}
379 416
417static void drm_mode_object_register(struct drm_device *dev,
418 struct drm_mode_object *obj)
419{
380 mutex_lock(&dev->mode_config.idr_mutex); 420 mutex_lock(&dev->mode_config.idr_mutex);
381 ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL); 421 idr_replace(&dev->mode_config.crtc_idr, obj, obj->id);
382 if (ret >= 0) {
383 /*
384 * Set up the object linking under the protection of the idr
385 * lock so that other users can't see inconsistent state.
386 */
387 obj->id = ret;
388 obj->type = obj_type;
389 }
390 mutex_unlock(&dev->mode_config.idr_mutex); 422 mutex_unlock(&dev->mode_config.idr_mutex);
391
392 return ret < 0 ? ret : 0;
393} 423}
394 424
395/** 425/**
@@ -416,8 +446,12 @@ static struct drm_mode_object *_object_find(struct drm_device *dev,
416 446
417 mutex_lock(&dev->mode_config.idr_mutex); 447 mutex_lock(&dev->mode_config.idr_mutex);
418 obj = idr_find(&dev->mode_config.crtc_idr, id); 448 obj = idr_find(&dev->mode_config.crtc_idr, id);
419 if (!obj || (type != DRM_MODE_OBJECT_ANY && obj->type != type) || 449 if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
420 (obj->id != id)) 450 obj = NULL;
451 if (obj && obj->id != id)
452 obj = NULL;
453 /* don't leak out unref'd fb's */
454 if (obj && (obj->type == DRM_MODE_OBJECT_FB))
421 obj = NULL; 455 obj = NULL;
422 mutex_unlock(&dev->mode_config.idr_mutex); 456 mutex_unlock(&dev->mode_config.idr_mutex);
423 457
@@ -444,9 +478,6 @@ struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
444 * function.*/ 478 * function.*/
445 WARN_ON(type == DRM_MODE_OBJECT_FB); 479 WARN_ON(type == DRM_MODE_OBJECT_FB);
446 obj = _object_find(dev, id, type); 480 obj = _object_find(dev, id, type);
447 /* don't leak out unref'd fb's */
448 if (obj && (obj->type == DRM_MODE_OBJECT_FB))
449 obj = NULL;
450 return obj; 481 return obj;
451} 482}
452EXPORT_SYMBOL(drm_mode_object_find); 483EXPORT_SYMBOL(drm_mode_object_find);
@@ -723,7 +754,7 @@ DEFINE_WW_CLASS(crtc_ww_class);
723 */ 754 */
724int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, 755int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
725 struct drm_plane *primary, 756 struct drm_plane *primary,
726 void *cursor, 757 struct drm_plane *cursor,
727 const struct drm_crtc_funcs *funcs) 758 const struct drm_crtc_funcs *funcs)
728{ 759{
729 struct drm_mode_config *config = &dev->mode_config; 760 struct drm_mode_config *config = &dev->mode_config;
@@ -748,8 +779,11 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
748 config->num_crtc++; 779 config->num_crtc++;
749 780
750 crtc->primary = primary; 781 crtc->primary = primary;
782 crtc->cursor = cursor;
751 if (primary) 783 if (primary)
752 primary->possible_crtcs = 1 << drm_crtc_index(crtc); 784 primary->possible_crtcs = 1 << drm_crtc_index(crtc);
785 if (cursor)
786 cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
753 787
754 out: 788 out:
755 drm_modeset_unlock_all(dev); 789 drm_modeset_unlock_all(dev);
@@ -842,7 +876,7 @@ int drm_connector_init(struct drm_device *dev,
842 876
843 drm_modeset_lock_all(dev); 877 drm_modeset_lock_all(dev);
844 878
845 ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR); 879 ret = drm_mode_object_get_reg(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR, false);
846 if (ret) 880 if (ret)
847 goto out_unlock; 881 goto out_unlock;
848 882
@@ -881,6 +915,8 @@ int drm_connector_init(struct drm_device *dev,
881 drm_object_attach_property(&connector->base, 915 drm_object_attach_property(&connector->base,
882 dev->mode_config.dpms_property, 0); 916 dev->mode_config.dpms_property, 0);
883 917
918 connector->debugfs_entry = NULL;
919
884out_put: 920out_put:
885 if (ret) 921 if (ret)
886 drm_mode_object_put(dev, &connector->base); 922 drm_mode_object_put(dev, &connector->base);
@@ -921,6 +957,49 @@ void drm_connector_cleanup(struct drm_connector *connector)
921EXPORT_SYMBOL(drm_connector_cleanup); 957EXPORT_SYMBOL(drm_connector_cleanup);
922 958
923/** 959/**
960 * drm_connector_register - register a connector
961 * @connector: the connector to register
962 *
963 * Register userspace interfaces for a connector
964 *
965 * Returns:
966 * Zero on success, error code on failure.
967 */
968int drm_connector_register(struct drm_connector *connector)
969{
970 int ret;
971
972 drm_mode_object_register(connector->dev, &connector->base);
973
974 ret = drm_sysfs_connector_add(connector);
975 if (ret)
976 return ret;
977
978 ret = drm_debugfs_connector_add(connector);
979 if (ret) {
980 drm_sysfs_connector_remove(connector);
981 return ret;
982 }
983
984 return 0;
985}
986EXPORT_SYMBOL(drm_connector_register);
987
988/**
989 * drm_connector_unregister - unregister a connector
990 * @connector: the connector to unregister
991 *
992 * Unregister userspace interfaces for a connector
993 */
994void drm_connector_unregister(struct drm_connector *connector)
995{
996 drm_sysfs_connector_remove(connector);
997 drm_debugfs_connector_remove(connector);
998}
999EXPORT_SYMBOL(drm_connector_unregister);
1000
1001
1002/**
924 * drm_connector_unplug_all - unregister connector userspace interfaces 1003 * drm_connector_unplug_all - unregister connector userspace interfaces
925 * @dev: drm device 1004 * @dev: drm device
926 * 1005 *
@@ -934,7 +1013,7 @@ void drm_connector_unplug_all(struct drm_device *dev)
934 1013
935 /* taking the mode config mutex ends up in a clash with sysfs */ 1014 /* taking the mode config mutex ends up in a clash with sysfs */
936 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 1015 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
937 drm_sysfs_connector_remove(connector); 1016 drm_connector_unregister(connector);
938 1017
939} 1018}
940EXPORT_SYMBOL(drm_connector_unplug_all); 1019EXPORT_SYMBOL(drm_connector_unplug_all);
@@ -1214,6 +1293,7 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
1214{ 1293{
1215 struct drm_property *edid; 1294 struct drm_property *edid;
1216 struct drm_property *dpms; 1295 struct drm_property *dpms;
1296 struct drm_property *dev_path;
1217 1297
1218 /* 1298 /*
1219 * Standard properties (apply to all connectors) 1299 * Standard properties (apply to all connectors)
@@ -1228,6 +1308,12 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
1228 ARRAY_SIZE(drm_dpms_enum_list)); 1308 ARRAY_SIZE(drm_dpms_enum_list));
1229 dev->mode_config.dpms_property = dpms; 1309 dev->mode_config.dpms_property = dpms;
1230 1310
1311 dev_path = drm_property_create(dev,
1312 DRM_MODE_PROP_BLOB |
1313 DRM_MODE_PROP_IMMUTABLE,
1314 "PATH", 0);
1315 dev->mode_config.path_property = dev_path;
1316
1231 return 0; 1317 return 0;
1232} 1318}
1233 1319
@@ -1384,6 +1470,33 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
1384EXPORT_SYMBOL(drm_mode_create_scaling_mode_property); 1470EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
1385 1471
1386/** 1472/**
1473 * drm_mode_create_aspect_ratio_property - create aspect ratio property
1474 * @dev: DRM device
1475 *
1476 * Called by a driver the first time it's needed, must be attached to desired
1477 * connectors.
1478 *
1479 * Returns:
1480 * Zero on success, errno on failure.
1481 */
1482int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
1483{
1484 if (dev->mode_config.aspect_ratio_property)
1485 return 0;
1486
1487 dev->mode_config.aspect_ratio_property =
1488 drm_property_create_enum(dev, 0, "aspect ratio",
1489 drm_aspect_ratio_enum_list,
1490 ARRAY_SIZE(drm_aspect_ratio_enum_list));
1491
1492 if (dev->mode_config.aspect_ratio_property == NULL)
1493 return -ENOMEM;
1494
1495 return 0;
1496}
1497EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
1498
1499/**
1387 * drm_mode_create_dirty_property - create dirty property 1500 * drm_mode_create_dirty_property - create dirty property
1388 * @dev: DRM device 1501 * @dev: DRM device
1389 * 1502 *
@@ -1470,6 +1583,15 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
1470} 1583}
1471EXPORT_SYMBOL(drm_mode_group_init_legacy_group); 1584EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
1472 1585
1586void drm_reinit_primary_mode_group(struct drm_device *dev)
1587{
1588 drm_modeset_lock_all(dev);
1589 drm_mode_group_destroy(&dev->primary->mode_group);
1590 drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
1591 drm_modeset_unlock_all(dev);
1592}
1593EXPORT_SYMBOL(drm_reinit_primary_mode_group);
1594
1473/** 1595/**
1474 * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo 1596 * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
1475 * @out: drm_mode_modeinfo struct to return to the user 1597 * @out: drm_mode_modeinfo struct to return to the user
@@ -2118,45 +2240,32 @@ out:
2118 return ret; 2240 return ret;
2119} 2241}
2120 2242
2121/** 2243/*
2122 * drm_mode_setplane - configure a plane's configuration 2244 * setplane_internal - setplane handler for internal callers
2123 * @dev: DRM device
2124 * @data: ioctl data*
2125 * @file_priv: DRM file info
2126 * 2245 *
2127 * Set plane configuration, including placement, fb, scaling, and other factors. 2246 * Note that we assume an extra reference has already been taken on fb. If the
2128 * Or pass a NULL fb to disable. 2247 * update fails, this reference will be dropped before return; if it succeeds,
2248 * the previous framebuffer (if any) will be unreferenced instead.
2129 * 2249 *
2130 * Returns: 2250 * src_{x,y,w,h} are provided in 16.16 fixed point format
2131 * Zero on success, errno on failure.
2132 */ 2251 */
2133int drm_mode_setplane(struct drm_device *dev, void *data, 2252static int setplane_internal(struct drm_plane *plane,
2134 struct drm_file *file_priv) 2253 struct drm_crtc *crtc,
2254 struct drm_framebuffer *fb,
2255 int32_t crtc_x, int32_t crtc_y,
2256 uint32_t crtc_w, uint32_t crtc_h,
2257 /* src_{x,y,w,h} values are 16.16 fixed point */
2258 uint32_t src_x, uint32_t src_y,
2259 uint32_t src_w, uint32_t src_h)
2135{ 2260{
2136 struct drm_mode_set_plane *plane_req = data; 2261 struct drm_device *dev = plane->dev;
2137 struct drm_plane *plane; 2262 struct drm_framebuffer *old_fb = NULL;
2138 struct drm_crtc *crtc;
2139 struct drm_framebuffer *fb = NULL, *old_fb = NULL;
2140 int ret = 0; 2263 int ret = 0;
2141 unsigned int fb_width, fb_height; 2264 unsigned int fb_width, fb_height;
2142 int i; 2265 int i;
2143 2266
2144 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2145 return -EINVAL;
2146
2147 /*
2148 * First, find the plane, crtc, and fb objects. If not available,
2149 * we don't bother to call the driver.
2150 */
2151 plane = drm_plane_find(dev, plane_req->plane_id);
2152 if (!plane) {
2153 DRM_DEBUG_KMS("Unknown plane ID %d\n",
2154 plane_req->plane_id);
2155 return -ENOENT;
2156 }
2157
2158 /* No fb means shut it down */ 2267 /* No fb means shut it down */
2159 if (!plane_req->fb_id) { 2268 if (!fb) {
2160 drm_modeset_lock_all(dev); 2269 drm_modeset_lock_all(dev);
2161 old_fb = plane->fb; 2270 old_fb = plane->fb;
2162 ret = plane->funcs->disable_plane(plane); 2271 ret = plane->funcs->disable_plane(plane);
@@ -2170,14 +2279,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
2170 goto out; 2279 goto out;
2171 } 2280 }
2172 2281
2173 crtc = drm_crtc_find(dev, plane_req->crtc_id);
2174 if (!crtc) {
2175 DRM_DEBUG_KMS("Unknown crtc ID %d\n",
2176 plane_req->crtc_id);
2177 ret = -ENOENT;
2178 goto out;
2179 }
2180
2181 /* Check whether this plane is usable on this CRTC */ 2282 /* Check whether this plane is usable on this CRTC */
2182 if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) { 2283 if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
2183 DRM_DEBUG_KMS("Invalid crtc for plane\n"); 2284 DRM_DEBUG_KMS("Invalid crtc for plane\n");
@@ -2185,14 +2286,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
2185 goto out; 2286 goto out;
2186 } 2287 }
2187 2288
2188 fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
2189 if (!fb) {
2190 DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
2191 plane_req->fb_id);
2192 ret = -ENOENT;
2193 goto out;
2194 }
2195
2196 /* Check whether this plane supports the fb pixel format. */ 2289 /* Check whether this plane supports the fb pixel format. */
2197 for (i = 0; i < plane->format_count; i++) 2290 for (i = 0; i < plane->format_count; i++)
2198 if (fb->pixel_format == plane->format_types[i]) 2291 if (fb->pixel_format == plane->format_types[i])
@@ -2208,43 +2301,25 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
2208 fb_height = fb->height << 16; 2301 fb_height = fb->height << 16;
2209 2302
2210 /* Make sure source coordinates are inside the fb. */ 2303 /* Make sure source coordinates are inside the fb. */
2211 if (plane_req->src_w > fb_width || 2304 if (src_w > fb_width ||
2212 plane_req->src_x > fb_width - plane_req->src_w || 2305 src_x > fb_width - src_w ||
2213 plane_req->src_h > fb_height || 2306 src_h > fb_height ||
2214 plane_req->src_y > fb_height - plane_req->src_h) { 2307 src_y > fb_height - src_h) {
2215 DRM_DEBUG_KMS("Invalid source coordinates " 2308 DRM_DEBUG_KMS("Invalid source coordinates "
2216 "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n", 2309 "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
2217 plane_req->src_w >> 16, 2310 src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
2218 ((plane_req->src_w & 0xffff) * 15625) >> 10, 2311 src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
2219 plane_req->src_h >> 16, 2312 src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
2220 ((plane_req->src_h & 0xffff) * 15625) >> 10, 2313 src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
2221 plane_req->src_x >> 16,
2222 ((plane_req->src_x & 0xffff) * 15625) >> 10,
2223 plane_req->src_y >> 16,
2224 ((plane_req->src_y & 0xffff) * 15625) >> 10);
2225 ret = -ENOSPC; 2314 ret = -ENOSPC;
2226 goto out; 2315 goto out;
2227 } 2316 }
2228 2317
2229 /* Give drivers some help against integer overflows */
2230 if (plane_req->crtc_w > INT_MAX ||
2231 plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
2232 plane_req->crtc_h > INT_MAX ||
2233 plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
2234 DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
2235 plane_req->crtc_w, plane_req->crtc_h,
2236 plane_req->crtc_x, plane_req->crtc_y);
2237 ret = -ERANGE;
2238 goto out;
2239 }
2240
2241 drm_modeset_lock_all(dev); 2318 drm_modeset_lock_all(dev);
2242 old_fb = plane->fb; 2319 old_fb = plane->fb;
2243 ret = plane->funcs->update_plane(plane, crtc, fb, 2320 ret = plane->funcs->update_plane(plane, crtc, fb,
2244 plane_req->crtc_x, plane_req->crtc_y, 2321 crtc_x, crtc_y, crtc_w, crtc_h,
2245 plane_req->crtc_w, plane_req->crtc_h, 2322 src_x, src_y, src_w, src_h);
2246 plane_req->src_x, plane_req->src_y,
2247 plane_req->src_w, plane_req->src_h);
2248 if (!ret) { 2323 if (!ret) {
2249 plane->crtc = crtc; 2324 plane->crtc = crtc;
2250 plane->fb = fb; 2325 plane->fb = fb;
@@ -2261,6 +2336,85 @@ out:
2261 drm_framebuffer_unreference(old_fb); 2336 drm_framebuffer_unreference(old_fb);
2262 2337
2263 return ret; 2338 return ret;
2339
2340}
2341
2342/**
2343 * drm_mode_setplane - configure a plane's configuration
2344 * @dev: DRM device
2345 * @data: ioctl data*
2346 * @file_priv: DRM file info
2347 *
2348 * Set plane configuration, including placement, fb, scaling, and other factors.
2349 * Or pass a NULL fb to disable (planes may be disabled without providing a
2350 * valid crtc).
2351 *
2352 * Returns:
2353 * Zero on success, errno on failure.
2354 */
2355int drm_mode_setplane(struct drm_device *dev, void *data,
2356 struct drm_file *file_priv)
2357{
2358 struct drm_mode_set_plane *plane_req = data;
2359 struct drm_mode_object *obj;
2360 struct drm_plane *plane;
2361 struct drm_crtc *crtc = NULL;
2362 struct drm_framebuffer *fb = NULL;
2363
2364 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2365 return -EINVAL;
2366
2367 /* Give drivers some help against integer overflows */
2368 if (plane_req->crtc_w > INT_MAX ||
2369 plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
2370 plane_req->crtc_h > INT_MAX ||
2371 plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
2372 DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
2373 plane_req->crtc_w, plane_req->crtc_h,
2374 plane_req->crtc_x, plane_req->crtc_y);
2375 return -ERANGE;
2376 }
2377
2378 /*
2379 * First, find the plane, crtc, and fb objects. If not available,
2380 * we don't bother to call the driver.
2381 */
2382 obj = drm_mode_object_find(dev, plane_req->plane_id,
2383 DRM_MODE_OBJECT_PLANE);
2384 if (!obj) {
2385 DRM_DEBUG_KMS("Unknown plane ID %d\n",
2386 plane_req->plane_id);
2387 return -ENOENT;
2388 }
2389 plane = obj_to_plane(obj);
2390
2391 if (plane_req->fb_id) {
2392 fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
2393 if (!fb) {
2394 DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
2395 plane_req->fb_id);
2396 return -ENOENT;
2397 }
2398
2399 obj = drm_mode_object_find(dev, plane_req->crtc_id,
2400 DRM_MODE_OBJECT_CRTC);
2401 if (!obj) {
2402 DRM_DEBUG_KMS("Unknown crtc ID %d\n",
2403 plane_req->crtc_id);
2404 return -ENOENT;
2405 }
2406 crtc = obj_to_crtc(obj);
2407 }
2408
2409 /*
2410 * setplane_internal will take care of deref'ing either the old or new
2411 * framebuffer depending on success.
2412 */
2413 return setplane_internal(plane, crtc, fb,
2414 plane_req->crtc_x, plane_req->crtc_y,
2415 plane_req->crtc_w, plane_req->crtc_h,
2416 plane_req->src_x, plane_req->src_y,
2417 plane_req->src_w, plane_req->src_h);
2264} 2418}
2265 2419
2266/** 2420/**
@@ -2509,6 +2663,102 @@ out:
2509 return ret; 2663 return ret;
2510} 2664}
2511 2665
2666/**
2667 * drm_mode_cursor_universal - translate legacy cursor ioctl call into a
2668 * universal plane handler call
2669 * @crtc: crtc to update cursor for
2670 * @req: data pointer for the ioctl
2671 * @file_priv: drm file for the ioctl call
2672 *
2673 * Legacy cursor ioctl's work directly with driver buffer handles. To
2674 * translate legacy ioctl calls into universal plane handler calls, we need to
2675 * wrap the native buffer handle in a drm_framebuffer.
2676 *
2677 * Note that we assume any handle passed to the legacy ioctls was a 32-bit ARGB
2678 * buffer with a pitch of 4*width; the universal plane interface should be used
2679 * directly in cases where the hardware can support other buffer settings and
2680 * userspace wants to make use of these capabilities.
2681 *
2682 * Returns:
2683 * Zero on success, errno on failure.
2684 */
2685static int drm_mode_cursor_universal(struct drm_crtc *crtc,
2686 struct drm_mode_cursor2 *req,
2687 struct drm_file *file_priv)
2688{
2689 struct drm_device *dev = crtc->dev;
2690 struct drm_framebuffer *fb = NULL;
2691 struct drm_mode_fb_cmd2 fbreq = {
2692 .width = req->width,
2693 .height = req->height,
2694 .pixel_format = DRM_FORMAT_ARGB8888,
2695 .pitches = { req->width * 4 },
2696 .handles = { req->handle },
2697 };
2698 int32_t crtc_x, crtc_y;
2699 uint32_t crtc_w = 0, crtc_h = 0;
2700 uint32_t src_w = 0, src_h = 0;
2701 int ret = 0;
2702
2703 BUG_ON(!crtc->cursor);
2704
2705 /*
2706 * Obtain fb we'll be using (either new or existing) and take an extra
2707 * reference to it if fb != null. setplane will take care of dropping
2708 * the reference if the plane update fails.
2709 */
2710 if (req->flags & DRM_MODE_CURSOR_BO) {
2711 if (req->handle) {
2712 fb = add_framebuffer_internal(dev, &fbreq, file_priv);
2713 if (IS_ERR(fb)) {
2714 DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
2715 return PTR_ERR(fb);
2716 }
2717
2718 drm_framebuffer_reference(fb);
2719 } else {
2720 fb = NULL;
2721 }
2722 } else {
2723 mutex_lock(&dev->mode_config.mutex);
2724 fb = crtc->cursor->fb;
2725 if (fb)
2726 drm_framebuffer_reference(fb);
2727 mutex_unlock(&dev->mode_config.mutex);
2728 }
2729
2730 if (req->flags & DRM_MODE_CURSOR_MOVE) {
2731 crtc_x = req->x;
2732 crtc_y = req->y;
2733 } else {
2734 crtc_x = crtc->cursor_x;
2735 crtc_y = crtc->cursor_y;
2736 }
2737
2738 if (fb) {
2739 crtc_w = fb->width;
2740 crtc_h = fb->height;
2741 src_w = fb->width << 16;
2742 src_h = fb->height << 16;
2743 }
2744
2745 /*
2746 * setplane_internal will take care of deref'ing either the old or new
2747 * framebuffer depending on success.
2748 */
2749 ret = setplane_internal(crtc->cursor, crtc, fb,
2750 crtc_x, crtc_y, crtc_w, crtc_h,
2751 0, 0, src_w, src_h);
2752
2753 /* Update successful; save new cursor position, if necessary */
2754 if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
2755 crtc->cursor_x = req->x;
2756 crtc->cursor_y = req->y;
2757 }
2758
2759 return ret;
2760}
2761
2512static int drm_mode_cursor_common(struct drm_device *dev, 2762static int drm_mode_cursor_common(struct drm_device *dev,
2513 struct drm_mode_cursor2 *req, 2763 struct drm_mode_cursor2 *req,
2514 struct drm_file *file_priv) 2764 struct drm_file *file_priv)
@@ -2528,6 +2778,13 @@ static int drm_mode_cursor_common(struct drm_device *dev,
2528 return -ENOENT; 2778 return -ENOENT;
2529 } 2779 }
2530 2780
2781 /*
2782 * If this crtc has a universal cursor plane, call that plane's update
2783 * handler rather than using legacy cursor handlers.
2784 */
2785 if (crtc->cursor)
2786 return drm_mode_cursor_universal(crtc, req, file_priv);
2787
2531 drm_modeset_lock(&crtc->mutex, NULL); 2788 drm_modeset_lock(&crtc->mutex, NULL);
2532 if (req->flags & DRM_MODE_CURSOR_BO) { 2789 if (req->flags & DRM_MODE_CURSOR_BO) {
2533 if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) { 2790 if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
@@ -2827,56 +3084,38 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
2827 return 0; 3084 return 0;
2828} 3085}
2829 3086
2830/** 3087static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
2831 * drm_mode_addfb2 - add an FB to the graphics configuration 3088 struct drm_mode_fb_cmd2 *r,
2832 * @dev: drm device for the ioctl 3089 struct drm_file *file_priv)
2833 * @data: data pointer for the ioctl
2834 * @file_priv: drm file for the ioctl call
2835 *
2836 * Add a new FB to the specified CRTC, given a user request with format. This is
2837 * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
2838 * and uses fourcc codes as pixel format specifiers.
2839 *
2840 * Called by the user via ioctl.
2841 *
2842 * Returns:
2843 * Zero on success, errno on failure.
2844 */
2845int drm_mode_addfb2(struct drm_device *dev,
2846 void *data, struct drm_file *file_priv)
2847{ 3090{
2848 struct drm_mode_fb_cmd2 *r = data;
2849 struct drm_mode_config *config = &dev->mode_config; 3091 struct drm_mode_config *config = &dev->mode_config;
2850 struct drm_framebuffer *fb; 3092 struct drm_framebuffer *fb;
2851 int ret; 3093 int ret;
2852 3094
2853 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2854 return -EINVAL;
2855
2856 if (r->flags & ~DRM_MODE_FB_INTERLACED) { 3095 if (r->flags & ~DRM_MODE_FB_INTERLACED) {
2857 DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags); 3096 DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
2858 return -EINVAL; 3097 return ERR_PTR(-EINVAL);
2859 } 3098 }
2860 3099
2861 if ((config->min_width > r->width) || (r->width > config->max_width)) { 3100 if ((config->min_width > r->width) || (r->width > config->max_width)) {
2862 DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n", 3101 DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
2863 r->width, config->min_width, config->max_width); 3102 r->width, config->min_width, config->max_width);
2864 return -EINVAL; 3103 return ERR_PTR(-EINVAL);
2865 } 3104 }
2866 if ((config->min_height > r->height) || (r->height > config->max_height)) { 3105 if ((config->min_height > r->height) || (r->height > config->max_height)) {
2867 DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n", 3106 DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
2868 r->height, config->min_height, config->max_height); 3107 r->height, config->min_height, config->max_height);
2869 return -EINVAL; 3108 return ERR_PTR(-EINVAL);
2870 } 3109 }
2871 3110
2872 ret = framebuffer_check(r); 3111 ret = framebuffer_check(r);
2873 if (ret) 3112 if (ret)
2874 return ret; 3113 return ERR_PTR(ret);
2875 3114
2876 fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); 3115 fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
2877 if (IS_ERR(fb)) { 3116 if (IS_ERR(fb)) {
2878 DRM_DEBUG_KMS("could not create framebuffer\n"); 3117 DRM_DEBUG_KMS("could not create framebuffer\n");
2879 return PTR_ERR(fb); 3118 return fb;
2880 } 3119 }
2881 3120
2882 mutex_lock(&file_priv->fbs_lock); 3121 mutex_lock(&file_priv->fbs_lock);
@@ -2885,8 +3124,37 @@ int drm_mode_addfb2(struct drm_device *dev,
2885 DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); 3124 DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
2886 mutex_unlock(&file_priv->fbs_lock); 3125 mutex_unlock(&file_priv->fbs_lock);
2887 3126
3127 return fb;
3128}
2888 3129
2889 return ret; 3130/**
3131 * drm_mode_addfb2 - add an FB to the graphics configuration
3132 * @dev: drm device for the ioctl
3133 * @data: data pointer for the ioctl
3134 * @file_priv: drm file for the ioctl call
3135 *
3136 * Add a new FB to the specified CRTC, given a user request with format. This is
3137 * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
3138 * and uses fourcc codes as pixel format specifiers.
3139 *
3140 * Called by the user via ioctl.
3141 *
3142 * Returns:
3143 * Zero on success, errno on failure.
3144 */
3145int drm_mode_addfb2(struct drm_device *dev,
3146 void *data, struct drm_file *file_priv)
3147{
3148 struct drm_framebuffer *fb;
3149
3150 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3151 return -EINVAL;
3152
3153 fb = add_framebuffer_internal(dev, data, file_priv);
3154 if (IS_ERR(fb))
3155 return PTR_ERR(fb);
3156
3157 return 0;
2890} 3158}
2891 3159
2892/** 3160/**
@@ -3176,7 +3444,7 @@ fail:
3176EXPORT_SYMBOL(drm_property_create); 3444EXPORT_SYMBOL(drm_property_create);
3177 3445
3178/** 3446/**
3179 * drm_property_create - create a new enumeration property type 3447 * drm_property_create_enum - create a new enumeration property type
3180 * @dev: drm device 3448 * @dev: drm device
3181 * @flags: flags specifying the property type 3449 * @flags: flags specifying the property type
3182 * @name: name of the property 3450 * @name: name of the property
@@ -3222,7 +3490,7 @@ struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
3222EXPORT_SYMBOL(drm_property_create_enum); 3490EXPORT_SYMBOL(drm_property_create_enum);
3223 3491
3224/** 3492/**
3225 * drm_property_create - create a new bitmask property type 3493 * drm_property_create_bitmask - create a new bitmask property type
3226 * @dev: drm device 3494 * @dev: drm device
3227 * @flags: flags specifying the property type 3495 * @flags: flags specifying the property type
3228 * @name: name of the property 3496 * @name: name of the property
@@ -3242,19 +3510,28 @@ EXPORT_SYMBOL(drm_property_create_enum);
3242struct drm_property *drm_property_create_bitmask(struct drm_device *dev, 3510struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
3243 int flags, const char *name, 3511 int flags, const char *name,
3244 const struct drm_prop_enum_list *props, 3512 const struct drm_prop_enum_list *props,
3245 int num_values) 3513 int num_props,
3514 uint64_t supported_bits)
3246{ 3515{
3247 struct drm_property *property; 3516 struct drm_property *property;
3248 int i, ret; 3517 int i, ret, index = 0;
3518 int num_values = hweight64(supported_bits);
3249 3519
3250 flags |= DRM_MODE_PROP_BITMASK; 3520 flags |= DRM_MODE_PROP_BITMASK;
3251 3521
3252 property = drm_property_create(dev, flags, name, num_values); 3522 property = drm_property_create(dev, flags, name, num_values);
3253 if (!property) 3523 if (!property)
3254 return NULL; 3524 return NULL;
3525 for (i = 0; i < num_props; i++) {
3526 if (!(supported_bits & (1ULL << props[i].type)))
3527 continue;
3255 3528
3256 for (i = 0; i < num_values; i++) { 3529 if (WARN_ON(index >= num_values)) {
3257 ret = drm_property_add_enum(property, i, 3530 drm_property_destroy(dev, property);
3531 return NULL;
3532 }
3533
3534 ret = drm_property_add_enum(property, index++,
3258 props[i].type, 3535 props[i].type,
3259 props[i].name); 3536 props[i].name);
3260 if (ret) { 3537 if (ret) {
@@ -3284,7 +3561,7 @@ static struct drm_property *property_create_range(struct drm_device *dev,
3284} 3561}
3285 3562
3286/** 3563/**
3287 * drm_property_create - create a new ranged property type 3564 * drm_property_create_range - create a new ranged property type
3288 * @dev: drm device 3565 * @dev: drm device
3289 * @flags: flags specifying the property type 3566 * @flags: flags specifying the property type
3290 * @name: name of the property 3567 * @name: name of the property
@@ -3703,6 +3980,25 @@ done:
3703 return ret; 3980 return ret;
3704} 3981}
3705 3982
3983int drm_mode_connector_set_path_property(struct drm_connector *connector,
3984 char *path)
3985{
3986 struct drm_device *dev = connector->dev;
3987 int ret, size;
3988 size = strlen(path) + 1;
3989
3990 connector->path_blob_ptr = drm_property_create_blob(connector->dev,
3991 size, path);
3992 if (!connector->path_blob_ptr)
3993 return -EINVAL;
3994
3995 ret = drm_object_property_set_value(&connector->base,
3996 dev->mode_config.path_property,
3997 connector->path_blob_ptr->base.id);
3998 return ret;
3999}
4000EXPORT_SYMBOL(drm_mode_connector_set_path_property);
4001
3706/** 4002/**
3707 * drm_mode_connector_update_edid_property - update the edid property of a connector 4003 * drm_mode_connector_update_edid_property - update the edid property of a connector
3708 * @connector: drm connector 4004 * @connector: drm connector
@@ -3720,6 +4016,10 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
3720 struct drm_device *dev = connector->dev; 4016 struct drm_device *dev = connector->dev;
3721 int ret, size; 4017 int ret, size;
3722 4018
4019 /* ignore requests to set edid when overridden */
4020 if (connector->override_edid)
4021 return 0;
4022
3723 if (connector->edid_blob_ptr) 4023 if (connector->edid_blob_ptr)
3724 drm_property_destroy_blob(dev, connector->edid_blob_ptr); 4024 drm_property_destroy_blob(dev, connector->edid_blob_ptr);
3725 4025
@@ -4680,6 +4980,36 @@ int drm_format_vert_chroma_subsampling(uint32_t format)
4680EXPORT_SYMBOL(drm_format_vert_chroma_subsampling); 4980EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
4681 4981
4682/** 4982/**
4983 * drm_rotation_simplify() - Try to simplify the rotation
4984 * @rotation: Rotation to be simplified
4985 * @supported_rotations: Supported rotations
4986 *
4987 * Attempt to simplify the rotation to a form that is supported.
4988 * Eg. if the hardware supports everything except DRM_REFLECT_X
4989 * one could call this function like this:
4990 *
4991 * drm_rotation_simplify(rotation, BIT(DRM_ROTATE_0) |
4992 * BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_180) |
4993 * BIT(DRM_ROTATE_270) | BIT(DRM_REFLECT_Y));
4994 *
4995 * to eliminate the DRM_ROTATE_X flag. Depending on what kind of
4996 * transforms the hardware supports, this function may not
4997 * be able to produce a supported transform, so the caller should
4998 * check the result afterwards.
4999 */
5000unsigned int drm_rotation_simplify(unsigned int rotation,
5001 unsigned int supported_rotations)
5002{
5003 if (rotation & ~supported_rotations) {
5004 rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y);
5005 rotation = (rotation & ~0xf) | BIT((ffs(rotation & 0xf) + 1) % 4);
5006 }
5007
5008 return rotation;
5009}
5010EXPORT_SYMBOL(drm_rotation_simplify);
5011
5012/**
4683 * drm_mode_config_init - initialize DRM mode_configuration structure 5013 * drm_mode_config_init - initialize DRM mode_configuration structure
4684 * @dev: DRM device 5014 * @dev: DRM device
4685 * 5015 *
@@ -4797,3 +5127,21 @@ void drm_mode_config_cleanup(struct drm_device *dev)
4797 drm_modeset_lock_fini(&dev->mode_config.connection_mutex); 5127 drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
4798} 5128}
4799EXPORT_SYMBOL(drm_mode_config_cleanup); 5129EXPORT_SYMBOL(drm_mode_config_cleanup);
5130
5131struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
5132 unsigned int supported_rotations)
5133{
5134 static const struct drm_prop_enum_list props[] = {
5135 { DRM_ROTATE_0, "rotate-0" },
5136 { DRM_ROTATE_90, "rotate-90" },
5137 { DRM_ROTATE_180, "rotate-180" },
5138 { DRM_ROTATE_270, "rotate-270" },
5139 { DRM_REFLECT_X, "reflect-x" },
5140 { DRM_REFLECT_Y, "reflect-y" },
5141 };
5142
5143 return drm_property_create_bitmask(dev, 0, "rotation",
5144 props, ARRAY_SIZE(props),
5145 supported_rotations);
5146}
5147EXPORT_SYMBOL(drm_mode_create_rotation_property);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 78b37f3febd3..6c65a0a28fbd 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -818,6 +818,7 @@ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
818 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth, 818 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
819 &fb->bits_per_pixel); 819 &fb->bits_per_pixel);
820 fb->pixel_format = mode_cmd->pixel_format; 820 fb->pixel_format = mode_cmd->pixel_format;
821 fb->flags = mode_cmd->flags;
821} 822}
822EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); 823EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
823 824
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index b4b51d46f339..13bd42923dd4 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -35,6 +35,7 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/export.h> 36#include <linux/export.h>
37#include <drm/drmP.h> 37#include <drm/drmP.h>
38#include <drm/drm_edid.h>
38 39
39#if defined(CONFIG_DEBUG_FS) 40#if defined(CONFIG_DEBUG_FS)
40 41
@@ -237,5 +238,186 @@ int drm_debugfs_cleanup(struct drm_minor *minor)
237 return 0; 238 return 0;
238} 239}
239 240
241static int connector_show(struct seq_file *m, void *data)
242{
243 struct drm_connector *connector = m->private;
244 const char *status;
245
246 switch (connector->force) {
247 case DRM_FORCE_ON:
248 status = "on\n";
249 break;
250
251 case DRM_FORCE_ON_DIGITAL:
252 status = "digital\n";
253 break;
254
255 case DRM_FORCE_OFF:
256 status = "off\n";
257 break;
258
259 case DRM_FORCE_UNSPECIFIED:
260 status = "unspecified\n";
261 break;
262
263 default:
264 return 0;
265 }
266
267 seq_puts(m, status);
268
269 return 0;
270}
271
272static int connector_open(struct inode *inode, struct file *file)
273{
274 struct drm_connector *dev = inode->i_private;
275
276 return single_open(file, connector_show, dev);
277}
278
279static ssize_t connector_write(struct file *file, const char __user *ubuf,
280 size_t len, loff_t *offp)
281{
282 struct seq_file *m = file->private_data;
283 struct drm_connector *connector = m->private;
284 char buf[12];
285
286 if (len > sizeof(buf) - 1)
287 return -EINVAL;
288
289 if (copy_from_user(buf, ubuf, len))
290 return -EFAULT;
291
292 buf[len] = '\0';
293
294 if (!strcmp(buf, "on"))
295 connector->force = DRM_FORCE_ON;
296 else if (!strcmp(buf, "digital"))
297 connector->force = DRM_FORCE_ON_DIGITAL;
298 else if (!strcmp(buf, "off"))
299 connector->force = DRM_FORCE_OFF;
300 else if (!strcmp(buf, "unspecified"))
301 connector->force = DRM_FORCE_UNSPECIFIED;
302 else
303 return -EINVAL;
304
305 return len;
306}
307
308static int edid_show(struct seq_file *m, void *data)
309{
310 struct drm_connector *connector = m->private;
311 struct drm_property_blob *edid = connector->edid_blob_ptr;
312
313 if (connector->override_edid && edid)
314 seq_write(m, edid->data, edid->length);
315
316 return 0;
317}
318
319static int edid_open(struct inode *inode, struct file *file)
320{
321 struct drm_connector *dev = inode->i_private;
322
323 return single_open(file, edid_show, dev);
324}
325
326static ssize_t edid_write(struct file *file, const char __user *ubuf,
327 size_t len, loff_t *offp)
328{
329 struct seq_file *m = file->private_data;
330 struct drm_connector *connector = m->private;
331 char *buf;
332 struct edid *edid;
333 int ret;
334
335 buf = memdup_user(ubuf, len);
336 if (IS_ERR(buf))
337 return PTR_ERR(buf);
338
339 edid = (struct edid *) buf;
340
341 if (len == 5 && !strncmp(buf, "reset", 5)) {
342 connector->override_edid = false;
343 ret = drm_mode_connector_update_edid_property(connector, NULL);
344 } else if (len < EDID_LENGTH ||
345 EDID_LENGTH * (1 + edid->extensions) > len)
346 ret = -EINVAL;
347 else {
348 connector->override_edid = false;
349 ret = drm_mode_connector_update_edid_property(connector, edid);
350 if (!ret)
351 connector->override_edid = true;
352 }
353
354 kfree(buf);
355
356 return (ret) ? ret : len;
357}
358
359static const struct file_operations drm_edid_fops = {
360 .owner = THIS_MODULE,
361 .open = edid_open,
362 .read = seq_read,
363 .llseek = seq_lseek,
364 .release = single_release,
365 .write = edid_write
366};
367
368
369static const struct file_operations drm_connector_fops = {
370 .owner = THIS_MODULE,
371 .open = connector_open,
372 .read = seq_read,
373 .llseek = seq_lseek,
374 .release = single_release,
375 .write = connector_write
376};
377
378int drm_debugfs_connector_add(struct drm_connector *connector)
379{
380 struct drm_minor *minor = connector->dev->primary;
381 struct dentry *root, *ent;
382
383 if (!minor->debugfs_root)
384 return -1;
385
386 root = debugfs_create_dir(connector->name, minor->debugfs_root);
387 if (!root)
388 return -ENOMEM;
389
390 connector->debugfs_entry = root;
391
392 /* force */
393 ent = debugfs_create_file("force", S_IRUGO | S_IWUSR, root, connector,
394 &drm_connector_fops);
395 if (!ent)
396 goto error;
397
398 /* edid */
399 ent = debugfs_create_file("edid_override", S_IRUGO | S_IWUSR, root,
400 connector, &drm_edid_fops);
401 if (!ent)
402 goto error;
403
404 return 0;
405
406error:
407 debugfs_remove_recursive(connector->debugfs_entry);
408 connector->debugfs_entry = NULL;
409 return -ENOMEM;
410}
411
412void drm_debugfs_connector_remove(struct drm_connector *connector)
413{
414 if (!connector->debugfs_entry)
415 return;
416
417 debugfs_remove_recursive(connector->debugfs_entry);
418
419 connector->debugfs_entry = NULL;
420}
421
240#endif /* CONFIG_DEBUG_FS */ 422#endif /* CONFIG_DEBUG_FS */
241 423
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
new file mode 100644
index 000000000000..ac3c2738db94
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -0,0 +1,2715 @@
1/*
2 * Copyright © 2014 Red Hat
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23#include <linux/kernel.h>
24#include <linux/delay.h>
25#include <linux/init.h>
26#include <linux/errno.h>
27#include <linux/sched.h>
28#include <linux/seq_file.h>
29#include <linux/i2c.h>
30#include <drm/drm_dp_mst_helper.h>
31#include <drm/drmP.h>
32
33#include <drm/drm_fixed.h>
34
35/**
36 * DOC: dp mst helper
37 *
38 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
39 * protocol. The helpers contain a topology manager and bandwidth manager.
40 * The helpers encapsulate the sending and received of sideband msgs.
41 */
42static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
43 char *buf);
44static int test_calc_pbn_mode(void);
45
46static void drm_dp_put_port(struct drm_dp_mst_port *port);
47
48static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
49 int id,
50 struct drm_dp_payload *payload);
51
52static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
53 struct drm_dp_mst_port *port,
54 int offset, int size, u8 *bytes);
55
56static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
57 struct drm_dp_mst_branch *mstb);
58static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
59 struct drm_dp_mst_branch *mstb,
60 struct drm_dp_mst_port *port);
61static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
62 u8 *guid);
63
64static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
65static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
66static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
67/* sideband msg handling */
68static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
69{
70 u8 bitmask = 0x80;
71 u8 bitshift = 7;
72 u8 array_index = 0;
73 int number_of_bits = num_nibbles * 4;
74 u8 remainder = 0;
75
76 while (number_of_bits != 0) {
77 number_of_bits--;
78 remainder <<= 1;
79 remainder |= (data[array_index] & bitmask) >> bitshift;
80 bitmask >>= 1;
81 bitshift--;
82 if (bitmask == 0) {
83 bitmask = 0x80;
84 bitshift = 7;
85 array_index++;
86 }
87 if ((remainder & 0x10) == 0x10)
88 remainder ^= 0x13;
89 }
90
91 number_of_bits = 4;
92 while (number_of_bits != 0) {
93 number_of_bits--;
94 remainder <<= 1;
95 if ((remainder & 0x10) != 0)
96 remainder ^= 0x13;
97 }
98
99 return remainder;
100}
101
102static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
103{
104 u8 bitmask = 0x80;
105 u8 bitshift = 7;
106 u8 array_index = 0;
107 int number_of_bits = number_of_bytes * 8;
108 u16 remainder = 0;
109
110 while (number_of_bits != 0) {
111 number_of_bits--;
112 remainder <<= 1;
113 remainder |= (data[array_index] & bitmask) >> bitshift;
114 bitmask >>= 1;
115 bitshift--;
116 if (bitmask == 0) {
117 bitmask = 0x80;
118 bitshift = 7;
119 array_index++;
120 }
121 if ((remainder & 0x100) == 0x100)
122 remainder ^= 0xd5;
123 }
124
125 number_of_bits = 8;
126 while (number_of_bits != 0) {
127 number_of_bits--;
128 remainder <<= 1;
129 if ((remainder & 0x100) != 0)
130 remainder ^= 0xd5;
131 }
132
133 return remainder & 0xff;
134}
135static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
136{
137 u8 size = 3;
138 size += (hdr->lct / 2);
139 return size;
140}
141
142static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
143 u8 *buf, int *len)
144{
145 int idx = 0;
146 int i;
147 u8 crc4;
148 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
149 for (i = 0; i < (hdr->lct / 2); i++)
150 buf[idx++] = hdr->rad[i];
151 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
152 (hdr->msg_len & 0x3f);
153 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
154
155 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
156 buf[idx - 1] |= (crc4 & 0xf);
157
158 *len = idx;
159}
160
161static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
162 u8 *buf, int buflen, u8 *hdrlen)
163{
164 u8 crc4;
165 u8 len;
166 int i;
167 u8 idx;
168 if (buf[0] == 0)
169 return false;
170 len = 3;
171 len += ((buf[0] & 0xf0) >> 4) / 2;
172 if (len > buflen)
173 return false;
174 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
175
176 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
177 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
178 return false;
179 }
180
181 hdr->lct = (buf[0] & 0xf0) >> 4;
182 hdr->lcr = (buf[0] & 0xf);
183 idx = 1;
184 for (i = 0; i < (hdr->lct / 2); i++)
185 hdr->rad[i] = buf[idx++];
186 hdr->broadcast = (buf[idx] >> 7) & 0x1;
187 hdr->path_msg = (buf[idx] >> 6) & 0x1;
188 hdr->msg_len = buf[idx] & 0x3f;
189 idx++;
190 hdr->somt = (buf[idx] >> 7) & 0x1;
191 hdr->eomt = (buf[idx] >> 6) & 0x1;
192 hdr->seqno = (buf[idx] >> 4) & 0x1;
193 idx++;
194 *hdrlen = idx;
195 return true;
196}
197
198static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
199 struct drm_dp_sideband_msg_tx *raw)
200{
201 int idx = 0;
202 int i;
203 u8 *buf = raw->msg;
204 buf[idx++] = req->req_type & 0x7f;
205
206 switch (req->req_type) {
207 case DP_ENUM_PATH_RESOURCES:
208 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
209 idx++;
210 break;
211 case DP_ALLOCATE_PAYLOAD:
212 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
213 (req->u.allocate_payload.number_sdp_streams & 0xf);
214 idx++;
215 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
216 idx++;
217 buf[idx] = (req->u.allocate_payload.pbn >> 8);
218 idx++;
219 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
220 idx++;
221 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
222 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
223 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
224 idx++;
225 }
226 if (req->u.allocate_payload.number_sdp_streams & 1) {
227 i = req->u.allocate_payload.number_sdp_streams - 1;
228 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
229 idx++;
230 }
231 break;
232 case DP_QUERY_PAYLOAD:
233 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
234 idx++;
235 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
236 idx++;
237 break;
238 case DP_REMOTE_DPCD_READ:
239 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
240 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
241 idx++;
242 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
243 idx++;
244 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
245 idx++;
246 buf[idx] = (req->u.dpcd_read.num_bytes);
247 idx++;
248 break;
249
250 case DP_REMOTE_DPCD_WRITE:
251 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
252 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
253 idx++;
254 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
255 idx++;
256 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
257 idx++;
258 buf[idx] = (req->u.dpcd_write.num_bytes);
259 idx++;
260 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
261 idx += req->u.dpcd_write.num_bytes;
262 break;
263 case DP_REMOTE_I2C_READ:
264 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
265 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
266 idx++;
267 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
268 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
269 idx++;
270 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
271 idx++;
272 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
273 idx += req->u.i2c_read.transactions[i].num_bytes;
274
275 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
276 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
277 idx++;
278 }
279 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
280 idx++;
281 buf[idx] = (req->u.i2c_read.num_bytes_read);
282 idx++;
283 break;
284
285 case DP_REMOTE_I2C_WRITE:
286 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
287 idx++;
288 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
289 idx++;
290 buf[idx] = (req->u.i2c_write.num_bytes);
291 idx++;
292 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
293 idx += req->u.i2c_write.num_bytes;
294 break;
295 }
296 raw->cur_len = idx;
297}
298
299static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
300{
301 u8 crc4;
302 crc4 = drm_dp_msg_data_crc4(msg, len);
303 msg[len] = crc4;
304}
305
306static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
307 struct drm_dp_sideband_msg_tx *raw)
308{
309 int idx = 0;
310 u8 *buf = raw->msg;
311
312 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
313
314 raw->cur_len = idx;
315}
316
317/* this adds a chunk of msg to the builder to get the final msg */
318static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
319 u8 *replybuf, u8 replybuflen, bool hdr)
320{
321 int ret;
322 u8 crc4;
323
324 if (hdr) {
325 u8 hdrlen;
326 struct drm_dp_sideband_msg_hdr recv_hdr;
327 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
328 if (ret == false) {
329 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
330 return false;
331 }
332
333 /* get length contained in this portion */
334 msg->curchunk_len = recv_hdr.msg_len;
335 msg->curchunk_hdrlen = hdrlen;
336
337 /* we have already gotten an somt - don't bother parsing */
338 if (recv_hdr.somt && msg->have_somt)
339 return false;
340
341 if (recv_hdr.somt) {
342 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
343 msg->have_somt = true;
344 }
345 if (recv_hdr.eomt)
346 msg->have_eomt = true;
347
348 /* copy the bytes for the remainder of this header chunk */
349 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
350 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
351 } else {
352 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
353 msg->curchunk_idx += replybuflen;
354 }
355
356 if (msg->curchunk_idx >= msg->curchunk_len) {
357 /* do CRC */
358 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
359 /* copy chunk into bigger msg */
360 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
361 msg->curlen += msg->curchunk_len - 1;
362 }
363 return true;
364}
365
366static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
367 struct drm_dp_sideband_msg_reply_body *repmsg)
368{
369 int idx = 1;
370 int i;
371 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
372 idx += 16;
373 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
374 idx++;
375 if (idx > raw->curlen)
376 goto fail_len;
377 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
378 if (raw->msg[idx] & 0x80)
379 repmsg->u.link_addr.ports[i].input_port = 1;
380
381 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
382 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
383
384 idx++;
385 if (idx > raw->curlen)
386 goto fail_len;
387 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
388 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
389 if (repmsg->u.link_addr.ports[i].input_port == 0)
390 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
391 idx++;
392 if (idx > raw->curlen)
393 goto fail_len;
394 if (repmsg->u.link_addr.ports[i].input_port == 0) {
395 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
396 idx++;
397 if (idx > raw->curlen)
398 goto fail_len;
399 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
400 idx += 16;
401 if (idx > raw->curlen)
402 goto fail_len;
403 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
404 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
405 idx++;
406
407 }
408 if (idx > raw->curlen)
409 goto fail_len;
410 }
411
412 return true;
413fail_len:
414 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
415 return false;
416}
417
418static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
419 struct drm_dp_sideband_msg_reply_body *repmsg)
420{
421 int idx = 1;
422 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
423 idx++;
424 if (idx > raw->curlen)
425 goto fail_len;
426 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
427 if (idx > raw->curlen)
428 goto fail_len;
429
430 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
431 return true;
432fail_len:
433 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
434 return false;
435}
436
437static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
438 struct drm_dp_sideband_msg_reply_body *repmsg)
439{
440 int idx = 1;
441 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
442 idx++;
443 if (idx > raw->curlen)
444 goto fail_len;
445 return true;
446fail_len:
447 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
448 return false;
449}
450
451static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
452 struct drm_dp_sideband_msg_reply_body *repmsg)
453{
454 int idx = 1;
455
456 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
457 idx++;
458 if (idx > raw->curlen)
459 goto fail_len;
460 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
461 idx++;
462 /* TODO check */
463 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
464 return true;
465fail_len:
466 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
467 return false;
468}
469
470static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
471 struct drm_dp_sideband_msg_reply_body *repmsg)
472{
473 int idx = 1;
474 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
475 idx++;
476 if (idx > raw->curlen)
477 goto fail_len;
478 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
479 idx += 2;
480 if (idx > raw->curlen)
481 goto fail_len;
482 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
483 idx += 2;
484 if (idx > raw->curlen)
485 goto fail_len;
486 return true;
487fail_len:
488 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
489 return false;
490}
491
492static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
493 struct drm_dp_sideband_msg_reply_body *repmsg)
494{
495 int idx = 1;
496 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
497 idx++;
498 if (idx > raw->curlen)
499 goto fail_len;
500 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
501 idx++;
502 if (idx > raw->curlen)
503 goto fail_len;
504 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
505 idx += 2;
506 if (idx > raw->curlen)
507 goto fail_len;
508 return true;
509fail_len:
510 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
511 return false;
512}
513
514static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
515 struct drm_dp_sideband_msg_reply_body *repmsg)
516{
517 int idx = 1;
518 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
519 idx++;
520 if (idx > raw->curlen)
521 goto fail_len;
522 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
523 idx += 2;
524 if (idx > raw->curlen)
525 goto fail_len;
526 return true;
527fail_len:
528 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
529 return false;
530}
531
532static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
533 struct drm_dp_sideband_msg_reply_body *msg)
534{
535 memset(msg, 0, sizeof(*msg));
536 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
537 msg->req_type = (raw->msg[0] & 0x7f);
538
539 if (msg->reply_type) {
540 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
541 msg->u.nak.reason = raw->msg[17];
542 msg->u.nak.nak_data = raw->msg[18];
543 return false;
544 }
545
546 switch (msg->req_type) {
547 case DP_LINK_ADDRESS:
548 return drm_dp_sideband_parse_link_address(raw, msg);
549 case DP_QUERY_PAYLOAD:
550 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
551 case DP_REMOTE_DPCD_READ:
552 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
553 case DP_REMOTE_DPCD_WRITE:
554 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
555 case DP_REMOTE_I2C_READ:
556 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
557 case DP_ENUM_PATH_RESOURCES:
558 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
559 case DP_ALLOCATE_PAYLOAD:
560 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
561 default:
562 DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
563 return false;
564 }
565}
566
567static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
568 struct drm_dp_sideband_msg_req_body *msg)
569{
570 int idx = 1;
571
572 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
573 idx++;
574 if (idx > raw->curlen)
575 goto fail_len;
576
577 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
578 idx += 16;
579 if (idx > raw->curlen)
580 goto fail_len;
581
582 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
583 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
584 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
585 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
586 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
587 idx++;
588 return true;
589fail_len:
590 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
591 return false;
592}
593
594static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
595 struct drm_dp_sideband_msg_req_body *msg)
596{
597 int idx = 1;
598
599 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
600 idx++;
601 if (idx > raw->curlen)
602 goto fail_len;
603
604 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
605 idx += 16;
606 if (idx > raw->curlen)
607 goto fail_len;
608
609 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
610 idx++;
611 return true;
612fail_len:
613 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
614 return false;
615}
616
617static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
618 struct drm_dp_sideband_msg_req_body *msg)
619{
620 memset(msg, 0, sizeof(*msg));
621 msg->req_type = (raw->msg[0] & 0x7f);
622
623 switch (msg->req_type) {
624 case DP_CONNECTION_STATUS_NOTIFY:
625 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
626 case DP_RESOURCE_STATUS_NOTIFY:
627 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
628 default:
629 DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
630 return false;
631 }
632}
633
634static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
635{
636 struct drm_dp_sideband_msg_req_body req;
637
638 req.req_type = DP_REMOTE_DPCD_WRITE;
639 req.u.dpcd_write.port_number = port_num;
640 req.u.dpcd_write.dpcd_address = offset;
641 req.u.dpcd_write.num_bytes = num_bytes;
642 req.u.dpcd_write.bytes = bytes;
643 drm_dp_encode_sideband_req(&req, msg);
644
645 return 0;
646}
647
648static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
649{
650 struct drm_dp_sideband_msg_req_body req;
651
652 req.req_type = DP_LINK_ADDRESS;
653 drm_dp_encode_sideband_req(&req, msg);
654 return 0;
655}
656
657static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
658{
659 struct drm_dp_sideband_msg_req_body req;
660
661 req.req_type = DP_ENUM_PATH_RESOURCES;
662 req.u.port_num.port_number = port_num;
663 drm_dp_encode_sideband_req(&req, msg);
664 msg->path_msg = true;
665 return 0;
666}
667
668static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
669 u8 vcpi, uint16_t pbn)
670{
671 struct drm_dp_sideband_msg_req_body req;
672 memset(&req, 0, sizeof(req));
673 req.req_type = DP_ALLOCATE_PAYLOAD;
674 req.u.allocate_payload.port_number = port_num;
675 req.u.allocate_payload.vcpi = vcpi;
676 req.u.allocate_payload.pbn = pbn;
677 drm_dp_encode_sideband_req(&req, msg);
678 msg->path_msg = true;
679 return 0;
680}
681
682static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
683 struct drm_dp_vcpi *vcpi)
684{
685 int ret;
686
687 mutex_lock(&mgr->payload_lock);
688 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
689 if (ret > mgr->max_payloads) {
690 ret = -EINVAL;
691 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
692 goto out_unlock;
693 }
694
695 set_bit(ret, &mgr->payload_mask);
696 vcpi->vcpi = ret;
697 mgr->proposed_vcpis[ret - 1] = vcpi;
698out_unlock:
699 mutex_unlock(&mgr->payload_lock);
700 return ret;
701}
702
703static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
704 int id)
705{
706 if (id == 0)
707 return;
708
709 mutex_lock(&mgr->payload_lock);
710 DRM_DEBUG_KMS("putting payload %d\n", id);
711 clear_bit(id, &mgr->payload_mask);
712 mgr->proposed_vcpis[id - 1] = NULL;
713 mutex_unlock(&mgr->payload_lock);
714}
715
716static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
717 struct drm_dp_sideband_msg_tx *txmsg)
718{
719 bool ret;
720 mutex_lock(&mgr->qlock);
721 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
722 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
723 mutex_unlock(&mgr->qlock);
724 return ret;
725}
726
727static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
728 struct drm_dp_sideband_msg_tx *txmsg)
729{
730 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
731 int ret;
732
733 ret = wait_event_timeout(mgr->tx_waitq,
734 check_txmsg_state(mgr, txmsg),
735 (4 * HZ));
736 mutex_lock(&mstb->mgr->qlock);
737 if (ret > 0) {
738 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
739 ret = -EIO;
740 goto out;
741 }
742 } else {
743 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
744
745 /* dump some state */
746 ret = -EIO;
747
748 /* remove from q */
749 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
750 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
751 list_del(&txmsg->next);
752 }
753
754 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
755 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
756 mstb->tx_slots[txmsg->seqno] = NULL;
757 }
758 }
759out:
760 mutex_unlock(&mgr->qlock);
761
762 return ret;
763}
764
765static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
766{
767 struct drm_dp_mst_branch *mstb;
768
769 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
770 if (!mstb)
771 return NULL;
772
773 mstb->lct = lct;
774 if (lct > 1)
775 memcpy(mstb->rad, rad, lct / 2);
776 INIT_LIST_HEAD(&mstb->ports);
777 kref_init(&mstb->kref);
778 return mstb;
779}
780
781static void drm_dp_destroy_mst_branch_device(struct kref *kref)
782{
783 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
784 struct drm_dp_mst_port *port, *tmp;
785 bool wake_tx = false;
786
787 cancel_work_sync(&mstb->mgr->work);
788
789 /*
790 * destroy all ports - don't need lock
791 * as there are no more references to the mst branch
792 * device at this point.
793 */
794 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
795 list_del(&port->next);
796 drm_dp_put_port(port);
797 }
798
799 /* drop any tx slots msg */
800 mutex_lock(&mstb->mgr->qlock);
801 if (mstb->tx_slots[0]) {
802 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
803 mstb->tx_slots[0] = NULL;
804 wake_tx = true;
805 }
806 if (mstb->tx_slots[1]) {
807 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
808 mstb->tx_slots[1] = NULL;
809 wake_tx = true;
810 }
811 mutex_unlock(&mstb->mgr->qlock);
812
813 if (wake_tx)
814 wake_up(&mstb->mgr->tx_waitq);
815 kfree(mstb);
816}
817
818static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
819{
820 kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
821}
822
823
824static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
825{
826 switch (old_pdt) {
827 case DP_PEER_DEVICE_DP_LEGACY_CONV:
828 case DP_PEER_DEVICE_SST_SINK:
829 /* remove i2c over sideband */
830 drm_dp_mst_unregister_i2c_bus(&port->aux);
831 break;
832 case DP_PEER_DEVICE_MST_BRANCHING:
833 drm_dp_put_mst_branch_device(port->mstb);
834 port->mstb = NULL;
835 break;
836 }
837}
838
839static void drm_dp_destroy_port(struct kref *kref)
840{
841 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
842 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
843 if (!port->input) {
844 port->vcpi.num_slots = 0;
845 if (port->connector)
846 (*port->mgr->cbs->destroy_connector)(mgr, port->connector);
847 drm_dp_port_teardown_pdt(port, port->pdt);
848
849 if (!port->input && port->vcpi.vcpi > 0)
850 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
851 }
852 kfree(port);
853
854 (*mgr->cbs->hotplug)(mgr);
855}
856
857static void drm_dp_put_port(struct drm_dp_mst_port *port)
858{
859 kref_put(&port->kref, drm_dp_destroy_port);
860}
861
862static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
863{
864 struct drm_dp_mst_port *port;
865 struct drm_dp_mst_branch *rmstb;
866 if (to_find == mstb) {
867 kref_get(&mstb->kref);
868 return mstb;
869 }
870 list_for_each_entry(port, &mstb->ports, next) {
871 if (port->mstb) {
872 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
873 if (rmstb)
874 return rmstb;
875 }
876 }
877 return NULL;
878}
879
880static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
881{
882 struct drm_dp_mst_branch *rmstb = NULL;
883 mutex_lock(&mgr->lock);
884 if (mgr->mst_primary)
885 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
886 mutex_unlock(&mgr->lock);
887 return rmstb;
888}
889
890static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
891{
892 struct drm_dp_mst_port *port, *mport;
893
894 list_for_each_entry(port, &mstb->ports, next) {
895 if (port == to_find) {
896 kref_get(&port->kref);
897 return port;
898 }
899 if (port->mstb) {
900 mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
901 if (mport)
902 return mport;
903 }
904 }
905 return NULL;
906}
907
908static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
909{
910 struct drm_dp_mst_port *rport = NULL;
911 mutex_lock(&mgr->lock);
912 if (mgr->mst_primary)
913 rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
914 mutex_unlock(&mgr->lock);
915 return rport;
916}
917
918static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
919{
920 struct drm_dp_mst_port *port;
921
922 list_for_each_entry(port, &mstb->ports, next) {
923 if (port->port_num == port_num) {
924 kref_get(&port->kref);
925 return port;
926 }
927 }
928
929 return NULL;
930}
931
932/*
933 * calculate a new RAD for this MST branch device
934 * if parent has an LCT of 2 then it has 1 nibble of RAD,
935 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
936 */
937static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
938 u8 *rad)
939{
940 int lct = port->parent->lct;
941 int shift = 4;
942 int idx = lct / 2;
943 if (lct > 1) {
944 memcpy(rad, port->parent->rad, idx);
945 shift = (lct % 2) ? 4 : 0;
946 } else
947 rad[0] = 0;
948
949 rad[idx] |= port->port_num << shift;
950 return lct + 1;
951}
952
953/*
954 * return sends link address for new mstb
955 */
956static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
957{
958 int ret;
959 u8 rad[6], lct;
960 bool send_link = false;
961 switch (port->pdt) {
962 case DP_PEER_DEVICE_DP_LEGACY_CONV:
963 case DP_PEER_DEVICE_SST_SINK:
964 /* add i2c over sideband */
965 ret = drm_dp_mst_register_i2c_bus(&port->aux);
966 break;
967 case DP_PEER_DEVICE_MST_BRANCHING:
968 lct = drm_dp_calculate_rad(port, rad);
969
970 port->mstb = drm_dp_add_mst_branch_device(lct, rad);
971 port->mstb->mgr = port->mgr;
972 port->mstb->port_parent = port;
973
974 send_link = true;
975 break;
976 }
977 return send_link;
978}
979
980static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
981 struct drm_dp_mst_port *port)
982{
983 int ret;
984 if (port->dpcd_rev >= 0x12) {
985 port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
986 if (!port->guid_valid) {
987 ret = drm_dp_send_dpcd_write(mstb->mgr,
988 port,
989 DP_GUID,
990 16, port->guid);
991 port->guid_valid = true;
992 }
993 }
994}
995
996static void build_mst_prop_path(struct drm_dp_mst_port *port,
997 struct drm_dp_mst_branch *mstb,
998 char *proppath)
999{
1000 int i;
1001 char temp[8];
1002 snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id);
1003 for (i = 0; i < (mstb->lct - 1); i++) {
1004 int shift = (i % 2) ? 0 : 4;
1005 int port_num = mstb->rad[i / 2] >> shift;
1006 snprintf(temp, 8, "-%d", port_num);
1007 strncat(proppath, temp, 255);
1008 }
1009 snprintf(temp, 8, "-%d", port->port_num);
1010 strncat(proppath, temp, 255);
1011}
1012
1013static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1014 struct device *dev,
1015 struct drm_dp_link_addr_reply_port *port_msg)
1016{
1017 struct drm_dp_mst_port *port;
1018 bool ret;
1019 bool created = false;
1020 int old_pdt = 0;
1021 int old_ddps = 0;
1022 port = drm_dp_get_port(mstb, port_msg->port_number);
1023 if (!port) {
1024 port = kzalloc(sizeof(*port), GFP_KERNEL);
1025 if (!port)
1026 return;
1027 kref_init(&port->kref);
1028 port->parent = mstb;
1029 port->port_num = port_msg->port_number;
1030 port->mgr = mstb->mgr;
1031 port->aux.name = "DPMST";
1032 port->aux.dev = dev;
1033 created = true;
1034 } else {
1035 old_pdt = port->pdt;
1036 old_ddps = port->ddps;
1037 }
1038
1039 port->pdt = port_msg->peer_device_type;
1040 port->input = port_msg->input_port;
1041 port->mcs = port_msg->mcs;
1042 port->ddps = port_msg->ddps;
1043 port->ldps = port_msg->legacy_device_plug_status;
1044 port->dpcd_rev = port_msg->dpcd_revision;
1045 port->num_sdp_streams = port_msg->num_sdp_streams;
1046 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1047 memcpy(port->guid, port_msg->peer_guid, 16);
1048
1049 /* manage mstb port lists with mgr lock - take a reference
1050 for this list */
1051 if (created) {
1052 mutex_lock(&mstb->mgr->lock);
1053 kref_get(&port->kref);
1054 list_add(&port->next, &mstb->ports);
1055 mutex_unlock(&mstb->mgr->lock);
1056 }
1057
1058 if (old_ddps != port->ddps) {
1059 if (port->ddps) {
1060 drm_dp_check_port_guid(mstb, port);
1061 if (!port->input)
1062 drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
1063 } else {
1064 port->guid_valid = false;
1065 port->available_pbn = 0;
1066 }
1067 }
1068
1069 if (old_pdt != port->pdt && !port->input) {
1070 drm_dp_port_teardown_pdt(port, old_pdt);
1071
1072 ret = drm_dp_port_setup_pdt(port);
1073 if (ret == true) {
1074 drm_dp_send_link_address(mstb->mgr, port->mstb);
1075 port->mstb->link_address_sent = true;
1076 }
1077 }
1078
1079 if (created && !port->input) {
1080 char proppath[255];
1081 build_mst_prop_path(port, mstb, proppath);
1082 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1083 }
1084
1085 /* put reference to this port */
1086 drm_dp_put_port(port);
1087}
1088
1089static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1090 struct drm_dp_connection_status_notify *conn_stat)
1091{
1092 struct drm_dp_mst_port *port;
1093 int old_pdt;
1094 int old_ddps;
1095 bool dowork = false;
1096 port = drm_dp_get_port(mstb, conn_stat->port_number);
1097 if (!port)
1098 return;
1099
1100 old_ddps = port->ddps;
1101 old_pdt = port->pdt;
1102 port->pdt = conn_stat->peer_device_type;
1103 port->mcs = conn_stat->message_capability_status;
1104 port->ldps = conn_stat->legacy_device_plug_status;
1105 port->ddps = conn_stat->displayport_device_plug_status;
1106
1107 if (old_ddps != port->ddps) {
1108 if (port->ddps) {
1109 drm_dp_check_port_guid(mstb, port);
1110 dowork = true;
1111 } else {
1112 port->guid_valid = false;
1113 port->available_pbn = 0;
1114 }
1115 }
1116 if (old_pdt != port->pdt && !port->input) {
1117 drm_dp_port_teardown_pdt(port, old_pdt);
1118
1119 if (drm_dp_port_setup_pdt(port))
1120 dowork = true;
1121 }
1122
1123 drm_dp_put_port(port);
1124 if (dowork)
1125 queue_work(system_long_wq, &mstb->mgr->work);
1126
1127}
1128
1129static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1130 u8 lct, u8 *rad)
1131{
1132 struct drm_dp_mst_branch *mstb;
1133 struct drm_dp_mst_port *port;
1134 int i;
1135 /* find the port by iterating down */
1136 mstb = mgr->mst_primary;
1137
1138 for (i = 0; i < lct - 1; i++) {
1139 int shift = (i % 2) ? 0 : 4;
1140 int port_num = rad[i / 2] >> shift;
1141
1142 list_for_each_entry(port, &mstb->ports, next) {
1143 if (port->port_num == port_num) {
1144 if (!port->mstb) {
1145 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1146 return NULL;
1147 }
1148
1149 mstb = port->mstb;
1150 break;
1151 }
1152 }
1153 }
1154 kref_get(&mstb->kref);
1155 return mstb;
1156}
1157
1158static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1159 struct drm_dp_mst_branch *mstb)
1160{
1161 struct drm_dp_mst_port *port;
1162
1163 if (!mstb->link_address_sent) {
1164 drm_dp_send_link_address(mgr, mstb);
1165 mstb->link_address_sent = true;
1166 }
1167 list_for_each_entry(port, &mstb->ports, next) {
1168 if (port->input)
1169 continue;
1170
1171 if (!port->ddps)
1172 continue;
1173
1174 if (!port->available_pbn)
1175 drm_dp_send_enum_path_resources(mgr, mstb, port);
1176
1177 if (port->mstb)
1178 drm_dp_check_and_send_link_address(mgr, port->mstb);
1179 }
1180}
1181
1182static void drm_dp_mst_link_probe_work(struct work_struct *work)
1183{
1184 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1185
1186 drm_dp_check_and_send_link_address(mgr, mgr->mst_primary);
1187
1188}
1189
1190static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1191 u8 *guid)
1192{
1193 static u8 zero_guid[16];
1194
1195 if (!memcmp(guid, zero_guid, 16)) {
1196 u64 salt = get_jiffies_64();
1197 memcpy(&guid[0], &salt, sizeof(u64));
1198 memcpy(&guid[8], &salt, sizeof(u64));
1199 return false;
1200 }
1201 return true;
1202}
1203
1204#if 0
1205static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1206{
1207 struct drm_dp_sideband_msg_req_body req;
1208
1209 req.req_type = DP_REMOTE_DPCD_READ;
1210 req.u.dpcd_read.port_number = port_num;
1211 req.u.dpcd_read.dpcd_address = offset;
1212 req.u.dpcd_read.num_bytes = num_bytes;
1213 drm_dp_encode_sideband_req(&req, msg);
1214
1215 return 0;
1216}
1217#endif
1218
1219static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1220 bool up, u8 *msg, int len)
1221{
1222 int ret;
1223 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1224 int tosend, total, offset;
1225 int retries = 0;
1226
1227retry:
1228 total = len;
1229 offset = 0;
1230 do {
1231 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1232
1233 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1234 &msg[offset],
1235 tosend);
1236 if (ret != tosend) {
1237 if (ret == -EIO && retries < 5) {
1238 retries++;
1239 goto retry;
1240 }
1241 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1242 WARN(1, "fail\n");
1243
1244 return -EIO;
1245 }
1246 offset += tosend;
1247 total -= tosend;
1248 } while (total > 0);
1249 return 0;
1250}
1251
1252static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1253 struct drm_dp_sideband_msg_tx *txmsg)
1254{
1255 struct drm_dp_mst_branch *mstb = txmsg->dst;
1256
1257 /* both msg slots are full */
1258 if (txmsg->seqno == -1) {
1259 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1260 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1261 return -EAGAIN;
1262 }
1263 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1264 txmsg->seqno = mstb->last_seqno;
1265 mstb->last_seqno ^= 1;
1266 } else if (mstb->tx_slots[0] == NULL)
1267 txmsg->seqno = 0;
1268 else
1269 txmsg->seqno = 1;
1270 mstb->tx_slots[txmsg->seqno] = txmsg;
1271 }
1272 hdr->broadcast = 0;
1273 hdr->path_msg = txmsg->path_msg;
1274 hdr->lct = mstb->lct;
1275 hdr->lcr = mstb->lct - 1;
1276 if (mstb->lct > 1)
1277 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1278 hdr->seqno = txmsg->seqno;
1279 return 0;
1280}
1281/*
1282 * process a single block of the next message in the sideband queue
1283 */
1284static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1285 struct drm_dp_sideband_msg_tx *txmsg,
1286 bool up)
1287{
1288 u8 chunk[48];
1289 struct drm_dp_sideband_msg_hdr hdr;
1290 int len, space, idx, tosend;
1291 int ret;
1292
1293 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
1294
1295 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1296 txmsg->seqno = -1;
1297 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1298 }
1299
1300 /* make hdr from dst mst - for replies use seqno
1301 otherwise assign one */
1302 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1303 if (ret < 0)
1304 return ret;
1305
1306 /* amount left to send in this message */
1307 len = txmsg->cur_len - txmsg->cur_offset;
1308
1309 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1310 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1311
1312 tosend = min(len, space);
1313 if (len == txmsg->cur_len)
1314 hdr.somt = 1;
1315 if (space >= len)
1316 hdr.eomt = 1;
1317
1318
1319 hdr.msg_len = tosend + 1;
1320 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1321 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1322 /* add crc at end */
1323 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1324 idx += tosend + 1;
1325
1326 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1327 if (ret) {
1328 DRM_DEBUG_KMS("sideband msg failed to send\n");
1329 return ret;
1330 }
1331
1332 txmsg->cur_offset += tosend;
1333 if (txmsg->cur_offset == txmsg->cur_len) {
1334 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1335 return 1;
1336 }
1337 return 0;
1338}
1339
1340/* must be called holding qlock */
1341static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1342{
1343 struct drm_dp_sideband_msg_tx *txmsg;
1344 int ret;
1345
1346 /* construct a chunk from the first msg in the tx_msg queue */
1347 if (list_empty(&mgr->tx_msg_downq)) {
1348 mgr->tx_down_in_progress = false;
1349 return;
1350 }
1351 mgr->tx_down_in_progress = true;
1352
1353 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1354 ret = process_single_tx_qlock(mgr, txmsg, false);
1355 if (ret == 1) {
1356 /* txmsg is sent it should be in the slots now */
1357 list_del(&txmsg->next);
1358 } else if (ret) {
1359 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1360 list_del(&txmsg->next);
1361 if (txmsg->seqno != -1)
1362 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1363 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1364 wake_up(&mgr->tx_waitq);
1365 }
1366 if (list_empty(&mgr->tx_msg_downq)) {
1367 mgr->tx_down_in_progress = false;
1368 return;
1369 }
1370}
1371
1372/* called holding qlock */
1373static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1374{
1375 struct drm_dp_sideband_msg_tx *txmsg;
1376 int ret;
1377
1378 /* construct a chunk from the first msg in the tx_msg queue */
1379 if (list_empty(&mgr->tx_msg_upq)) {
1380 mgr->tx_up_in_progress = false;
1381 return;
1382 }
1383
1384 txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
1385 ret = process_single_tx_qlock(mgr, txmsg, true);
1386 if (ret == 1) {
1387 /* up txmsgs aren't put in slots - so free after we send it */
1388 list_del(&txmsg->next);
1389 kfree(txmsg);
1390 } else if (ret)
1391 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1392 mgr->tx_up_in_progress = true;
1393}
1394
1395static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1396 struct drm_dp_sideband_msg_tx *txmsg)
1397{
1398 mutex_lock(&mgr->qlock);
1399 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
1400 if (!mgr->tx_down_in_progress)
1401 process_single_down_tx_qlock(mgr);
1402 mutex_unlock(&mgr->qlock);
1403}
1404
1405static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1406 struct drm_dp_mst_branch *mstb)
1407{
1408 int len;
1409 struct drm_dp_sideband_msg_tx *txmsg;
1410 int ret;
1411
1412 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1413 if (!txmsg)
1414 return -ENOMEM;
1415
1416 txmsg->dst = mstb;
1417 len = build_link_address(txmsg);
1418
1419 drm_dp_queue_down_tx(mgr, txmsg);
1420
1421 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1422 if (ret > 0) {
1423 int i;
1424
1425 if (txmsg->reply.reply_type == 1)
1426 DRM_DEBUG_KMS("link address nak received\n");
1427 else {
1428 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
1429 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1430 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
1431 txmsg->reply.u.link_addr.ports[i].input_port,
1432 txmsg->reply.u.link_addr.ports[i].peer_device_type,
1433 txmsg->reply.u.link_addr.ports[i].port_number,
1434 txmsg->reply.u.link_addr.ports[i].dpcd_revision,
1435 txmsg->reply.u.link_addr.ports[i].mcs,
1436 txmsg->reply.u.link_addr.ports[i].ddps,
1437 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
1438 txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1439 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1440 }
1441 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1442 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1443 }
1444 (*mgr->cbs->hotplug)(mgr);
1445 }
1446 } else
1447 DRM_DEBUG_KMS("link address failed %d\n", ret);
1448
1449 kfree(txmsg);
1450 return 0;
1451}
1452
1453static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1454 struct drm_dp_mst_branch *mstb,
1455 struct drm_dp_mst_port *port)
1456{
1457 int len;
1458 struct drm_dp_sideband_msg_tx *txmsg;
1459 int ret;
1460
1461 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1462 if (!txmsg)
1463 return -ENOMEM;
1464
1465 txmsg->dst = mstb;
1466 len = build_enum_path_resources(txmsg, port->port_num);
1467
1468 drm_dp_queue_down_tx(mgr, txmsg);
1469
1470 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1471 if (ret > 0) {
1472 if (txmsg->reply.reply_type == 1)
1473 DRM_DEBUG_KMS("enum path resources nak received\n");
1474 else {
1475 if (port->port_num != txmsg->reply.u.path_resources.port_number)
1476 DRM_ERROR("got incorrect port in response\n");
1477 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
1478 txmsg->reply.u.path_resources.avail_payload_bw_number);
1479 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
1480 }
1481 }
1482
1483 kfree(txmsg);
1484 return 0;
1485}
1486
1487static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1488 struct drm_dp_mst_port *port,
1489 int id,
1490 int pbn)
1491{
1492 struct drm_dp_sideband_msg_tx *txmsg;
1493 struct drm_dp_mst_branch *mstb;
1494 int len, ret;
1495
1496 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1497 if (!mstb)
1498 return -EINVAL;
1499
1500 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1501 if (!txmsg) {
1502 ret = -ENOMEM;
1503 goto fail_put;
1504 }
1505
1506 txmsg->dst = mstb;
1507 len = build_allocate_payload(txmsg, port->port_num,
1508 id,
1509 pbn);
1510
1511 drm_dp_queue_down_tx(mgr, txmsg);
1512
1513 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1514 if (ret > 0) {
1515 if (txmsg->reply.reply_type == 1) {
1516 ret = -EINVAL;
1517 } else
1518 ret = 0;
1519 }
1520 kfree(txmsg);
1521fail_put:
1522 drm_dp_put_mst_branch_device(mstb);
1523 return ret;
1524}
1525
1526static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1527 int id,
1528 struct drm_dp_payload *payload)
1529{
1530 int ret;
1531
1532 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
1533 if (ret < 0) {
1534 payload->payload_state = 0;
1535 return ret;
1536 }
1537 payload->payload_state = DP_PAYLOAD_LOCAL;
1538 return 0;
1539}
1540
1541static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1542 struct drm_dp_mst_port *port,
1543 int id,
1544 struct drm_dp_payload *payload)
1545{
1546 int ret;
1547 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
1548 if (ret < 0)
1549 return ret;
1550 payload->payload_state = DP_PAYLOAD_REMOTE;
1551 return ret;
1552}
1553
1554static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1555 struct drm_dp_mst_port *port,
1556 int id,
1557 struct drm_dp_payload *payload)
1558{
1559 DRM_DEBUG_KMS("\n");
1560 /* its okay for these to fail */
1561 if (port) {
1562 drm_dp_payload_send_msg(mgr, port, id, 0);
1563 }
1564
1565 drm_dp_dpcd_write_payload(mgr, id, payload);
1566 payload->payload_state = 0;
1567 return 0;
1568}
1569
1570static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1571 int id,
1572 struct drm_dp_payload *payload)
1573{
1574 payload->payload_state = 0;
1575 return 0;
1576}
1577
1578/**
1579 * drm_dp_update_payload_part1() - Execute payload update part 1
1580 * @mgr: manager to use.
1581 *
1582 * This iterates over all proposed virtual channels, and tries to
1583 * allocate space in the link for them. For 0->slots transitions,
1584 * this step just writes the VCPI to the MST device. For slots->0
1585 * transitions, this writes the updated VCPIs and removes the
1586 * remote VC payloads.
1587 *
1588 * after calling this the driver should generate ACT and payload
1589 * packets.
1590 */
1591int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1592{
1593 int i;
1594 int cur_slots = 1;
1595 struct drm_dp_payload req_payload;
1596 struct drm_dp_mst_port *port;
1597
1598 mutex_lock(&mgr->payload_lock);
1599 for (i = 0; i < mgr->max_payloads; i++) {
1600 /* solve the current payloads - compare to the hw ones
1601 - update the hw view */
1602 req_payload.start_slot = cur_slots;
1603 if (mgr->proposed_vcpis[i]) {
1604 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1605 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1606 } else {
1607 port = NULL;
1608 req_payload.num_slots = 0;
1609 }
1610 /* work out what is required to happen with this payload */
1611 if (mgr->payloads[i].start_slot != req_payload.start_slot ||
1612 mgr->payloads[i].num_slots != req_payload.num_slots) {
1613
1614 /* need to push an update for this payload */
1615 if (req_payload.num_slots) {
1616 drm_dp_create_payload_step1(mgr, i + 1, &req_payload);
1617 mgr->payloads[i].num_slots = req_payload.num_slots;
1618 } else if (mgr->payloads[i].num_slots) {
1619 mgr->payloads[i].num_slots = 0;
1620 drm_dp_destroy_payload_step1(mgr, port, i + 1, &mgr->payloads[i]);
1621 req_payload.payload_state = mgr->payloads[i].payload_state;
1622 } else
1623 req_payload.payload_state = 0;
1624
1625 mgr->payloads[i].start_slot = req_payload.start_slot;
1626 mgr->payloads[i].payload_state = req_payload.payload_state;
1627 }
1628 cur_slots += req_payload.num_slots;
1629 }
1630 mutex_unlock(&mgr->payload_lock);
1631
1632 return 0;
1633}
1634EXPORT_SYMBOL(drm_dp_update_payload_part1);
1635
1636/**
1637 * drm_dp_update_payload_part2() - Execute payload update part 2
1638 * @mgr: manager to use.
1639 *
1640 * This iterates over all proposed virtual channels, and tries to
1641 * allocate space in the link for them. For 0->slots transitions,
1642 * this step writes the remote VC payload commands. For slots->0
1643 * this just resets some internal state.
1644 */
1645int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
1646{
1647 struct drm_dp_mst_port *port;
1648 int i;
1649 int ret = 0;
1650 mutex_lock(&mgr->payload_lock);
1651 for (i = 0; i < mgr->max_payloads; i++) {
1652
1653 if (!mgr->proposed_vcpis[i])
1654 continue;
1655
1656 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1657
1658 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
1659 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
1660 ret = drm_dp_create_payload_step2(mgr, port, i + 1, &mgr->payloads[i]);
1661 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1662 ret = drm_dp_destroy_payload_step2(mgr, i + 1, &mgr->payloads[i]);
1663 }
1664 if (ret) {
1665 mutex_unlock(&mgr->payload_lock);
1666 return ret;
1667 }
1668 }
1669 mutex_unlock(&mgr->payload_lock);
1670 return 0;
1671}
1672EXPORT_SYMBOL(drm_dp_update_payload_part2);
1673
1674#if 0 /* unused as of yet */
1675static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
1676 struct drm_dp_mst_port *port,
1677 int offset, int size)
1678{
1679 int len;
1680 struct drm_dp_sideband_msg_tx *txmsg;
1681
1682 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1683 if (!txmsg)
1684 return -ENOMEM;
1685
1686 len = build_dpcd_read(txmsg, port->port_num, 0, 8);
1687 txmsg->dst = port->parent;
1688
1689 drm_dp_queue_down_tx(mgr, txmsg);
1690
1691 return 0;
1692}
1693#endif
1694
1695static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
1696 struct drm_dp_mst_port *port,
1697 int offset, int size, u8 *bytes)
1698{
1699 int len;
1700 int ret;
1701 struct drm_dp_sideband_msg_tx *txmsg;
1702 struct drm_dp_mst_branch *mstb;
1703
1704 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1705 if (!mstb)
1706 return -EINVAL;
1707
1708 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1709 if (!txmsg) {
1710 ret = -ENOMEM;
1711 goto fail_put;
1712 }
1713
1714 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
1715 txmsg->dst = mstb;
1716
1717 drm_dp_queue_down_tx(mgr, txmsg);
1718
1719 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1720 if (ret > 0) {
1721 if (txmsg->reply.reply_type == 1) {
1722 ret = -EINVAL;
1723 } else
1724 ret = 0;
1725 }
1726 kfree(txmsg);
1727fail_put:
1728 drm_dp_put_mst_branch_device(mstb);
1729 return ret;
1730}
1731
1732static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
1733{
1734 struct drm_dp_sideband_msg_reply_body reply;
1735
1736 reply.reply_type = 1;
1737 reply.req_type = req_type;
1738 drm_dp_encode_sideband_reply(&reply, msg);
1739 return 0;
1740}
1741
1742static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1743 struct drm_dp_mst_branch *mstb,
1744 int req_type, int seqno, bool broadcast)
1745{
1746 struct drm_dp_sideband_msg_tx *txmsg;
1747
1748 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1749 if (!txmsg)
1750 return -ENOMEM;
1751
1752 txmsg->dst = mstb;
1753 txmsg->seqno = seqno;
1754 drm_dp_encode_up_ack_reply(txmsg, req_type);
1755
1756 mutex_lock(&mgr->qlock);
1757 list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
1758 if (!mgr->tx_up_in_progress) {
1759 process_single_up_tx_qlock(mgr);
1760 }
1761 mutex_unlock(&mgr->qlock);
1762 return 0;
1763}
1764
1765static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count)
1766{
1767 switch (dp_link_bw) {
1768 case DP_LINK_BW_1_62:
1769 return 3 * dp_link_count;
1770 case DP_LINK_BW_2_7:
1771 return 5 * dp_link_count;
1772 case DP_LINK_BW_5_4:
1773 return 10 * dp_link_count;
1774 }
1775 return 0;
1776}
1777
1778/**
1779 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
1780 * @mgr: manager to set state for
1781 * @mst_state: true to enable MST on this connector - false to disable.
1782 *
1783 * This is called by the driver when it detects an MST capable device plugged
1784 * into a DP MST capable port, or when a DP MST capable device is unplugged.
1785 */
1786int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
1787{
1788 int ret = 0;
1789 struct drm_dp_mst_branch *mstb = NULL;
1790
1791 mutex_lock(&mgr->lock);
1792 if (mst_state == mgr->mst_state)
1793 goto out_unlock;
1794
1795 mgr->mst_state = mst_state;
1796 /* set the device into MST mode */
1797 if (mst_state) {
1798 WARN_ON(mgr->mst_primary);
1799
1800 /* get dpcd info */
1801 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
1802 if (ret != DP_RECEIVER_CAP_SIZE) {
1803 DRM_DEBUG_KMS("failed to read DPCD\n");
1804 goto out_unlock;
1805 }
1806
1807 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
1808 mgr->total_pbn = 2560;
1809 mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
1810 mgr->avail_slots = mgr->total_slots;
1811
1812 /* add initial branch device at LCT 1 */
1813 mstb = drm_dp_add_mst_branch_device(1, NULL);
1814 if (mstb == NULL) {
1815 ret = -ENOMEM;
1816 goto out_unlock;
1817 }
1818 mstb->mgr = mgr;
1819
1820 /* give this the main reference */
1821 mgr->mst_primary = mstb;
1822 kref_get(&mgr->mst_primary->kref);
1823
1824 {
1825 struct drm_dp_payload reset_pay;
1826 reset_pay.start_slot = 0;
1827 reset_pay.num_slots = 0x3f;
1828 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
1829 }
1830
1831 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1832 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
1833 if (ret < 0) {
1834 goto out_unlock;
1835 }
1836
1837
1838 /* sort out guid */
1839 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
1840 if (ret != 16) {
1841 DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
1842 goto out_unlock;
1843 }
1844
1845 mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
1846 if (!mgr->guid_valid) {
1847 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
1848 mgr->guid_valid = true;
1849 }
1850
1851 queue_work(system_long_wq, &mgr->work);
1852
1853 ret = 0;
1854 } else {
1855 /* disable MST on the device */
1856 mstb = mgr->mst_primary;
1857 mgr->mst_primary = NULL;
1858 /* this can fail if the device is gone */
1859 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
1860 ret = 0;
1861 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
1862 mgr->payload_mask = 0;
1863 set_bit(0, &mgr->payload_mask);
1864 }
1865
1866out_unlock:
1867 mutex_unlock(&mgr->lock);
1868 if (mstb)
1869 drm_dp_put_mst_branch_device(mstb);
1870 return ret;
1871
1872}
1873EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
1874
1875/**
1876 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
1877 * @mgr: manager to suspend
1878 *
1879 * This function tells the MST device that we can't handle UP messages
1880 * anymore. This should stop it from sending any since we are suspended.
1881 */
1882void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
1883{
1884 mutex_lock(&mgr->lock);
1885 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1886 DP_MST_EN | DP_UPSTREAM_IS_SRC);
1887 mutex_unlock(&mgr->lock);
1888}
1889EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
1890
1891/**
1892 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
1893 * @mgr: manager to resume
1894 *
1895 * This will fetch DPCD and see if the device is still there,
1896 * if it is, it will rewrite the MSTM control bits, and return.
1897 *
1898 * if the device fails this returns -1, and the driver should do
1899 * a full MST reprobe, in case we were undocked.
1900 */
1901int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
1902{
1903 int ret = 0;
1904
1905 mutex_lock(&mgr->lock);
1906
1907 if (mgr->mst_primary) {
1908 int sret;
1909 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
1910 if (sret != DP_RECEIVER_CAP_SIZE) {
1911 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
1912 ret = -1;
1913 goto out_unlock;
1914 }
1915
1916 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1917 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
1918 if (ret < 0) {
1919 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
1920 ret = -1;
1921 goto out_unlock;
1922 }
1923 ret = 0;
1924 } else
1925 ret = -1;
1926
1927out_unlock:
1928 mutex_unlock(&mgr->lock);
1929 return ret;
1930}
1931EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
1932
1933static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
1934{
1935 int len;
1936 u8 replyblock[32];
1937 int replylen, origlen, curreply;
1938 int ret;
1939 struct drm_dp_sideband_msg_rx *msg;
1940 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
1941 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
1942
1943 len = min(mgr->max_dpcd_transaction_bytes, 16);
1944 ret = drm_dp_dpcd_read(mgr->aux, basereg,
1945 replyblock, len);
1946 if (ret != len) {
1947 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
1948 return;
1949 }
1950 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
1951 if (!ret) {
1952 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
1953 return;
1954 }
1955 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
1956
1957 origlen = replylen;
1958 replylen -= len;
1959 curreply = len;
1960 while (replylen > 0) {
1961 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
1962 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
1963 replyblock, len);
1964 if (ret != len) {
1965 DRM_DEBUG_KMS("failed to read a chunk\n");
1966 }
1967 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
1968 if (ret == false)
1969 DRM_DEBUG_KMS("failed to build sideband msg\n");
1970 curreply += len;
1971 replylen -= len;
1972 }
1973}
1974
1975static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
1976{
1977 int ret = 0;
1978
1979 drm_dp_get_one_sb_msg(mgr, false);
1980
1981 if (mgr->down_rep_recv.have_eomt) {
1982 struct drm_dp_sideband_msg_tx *txmsg;
1983 struct drm_dp_mst_branch *mstb;
1984 int slot = -1;
1985 mstb = drm_dp_get_mst_branch_device(mgr,
1986 mgr->down_rep_recv.initial_hdr.lct,
1987 mgr->down_rep_recv.initial_hdr.rad);
1988
1989 if (!mstb) {
1990 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
1991 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
1992 return 0;
1993 }
1994
1995 /* find the message */
1996 slot = mgr->down_rep_recv.initial_hdr.seqno;
1997 mutex_lock(&mgr->qlock);
1998 txmsg = mstb->tx_slots[slot];
1999 /* remove from slots */
2000 mutex_unlock(&mgr->qlock);
2001
2002 if (!txmsg) {
2003 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2004 mstb,
2005 mgr->down_rep_recv.initial_hdr.seqno,
2006 mgr->down_rep_recv.initial_hdr.lct,
2007 mgr->down_rep_recv.initial_hdr.rad[0],
2008 mgr->down_rep_recv.msg[0]);
2009 drm_dp_put_mst_branch_device(mstb);
2010 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2011 return 0;
2012 }
2013
2014 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2015 if (txmsg->reply.reply_type == 1) {
2016 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
2017 }
2018
2019 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2020 drm_dp_put_mst_branch_device(mstb);
2021
2022 mutex_lock(&mgr->qlock);
2023 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2024 mstb->tx_slots[slot] = NULL;
2025 mutex_unlock(&mgr->qlock);
2026
2027 wake_up(&mgr->tx_waitq);
2028 }
2029 return ret;
2030}
2031
2032static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2033{
2034 int ret = 0;
2035 drm_dp_get_one_sb_msg(mgr, true);
2036
2037 if (mgr->up_req_recv.have_eomt) {
2038 struct drm_dp_sideband_msg_req_body msg;
2039 struct drm_dp_mst_branch *mstb;
2040 bool seqno;
2041 mstb = drm_dp_get_mst_branch_device(mgr,
2042 mgr->up_req_recv.initial_hdr.lct,
2043 mgr->up_req_recv.initial_hdr.rad);
2044 if (!mstb) {
2045 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2046 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2047 return 0;
2048 }
2049
2050 seqno = mgr->up_req_recv.initial_hdr.seqno;
2051 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2052
2053 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2054 drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
2055 drm_dp_update_port(mstb, &msg.u.conn_stat);
2056 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2057 (*mgr->cbs->hotplug)(mgr);
2058
2059 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2060 drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
2061 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2062 }
2063
2064 drm_dp_put_mst_branch_device(mstb);
2065 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2066 }
2067 return ret;
2068}
2069
2070/**
2071 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2072 * @mgr: manager to notify irq for.
2073 * @esi: 4 bytes from SINK_COUNT_ESI
2074 *
2075 * This should be called from the driver when it detects a short IRQ,
2076 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2077 * topology manager will process the sideband messages received as a result
2078 * of this.
2079 */
2080int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2081{
2082 int ret = 0;
2083 int sc;
2084 *handled = false;
2085 sc = esi[0] & 0x3f;
2086
2087 if (sc != mgr->sink_count) {
2088 mgr->sink_count = sc;
2089 *handled = true;
2090 }
2091
2092 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2093 ret = drm_dp_mst_handle_down_rep(mgr);
2094 *handled = true;
2095 }
2096
2097 if (esi[1] & DP_UP_REQ_MSG_RDY) {
2098 ret |= drm_dp_mst_handle_up_req(mgr);
2099 *handled = true;
2100 }
2101
2102 drm_dp_mst_kick_tx(mgr);
2103 return ret;
2104}
2105EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2106
2107/**
2108 * drm_dp_mst_detect_port() - get connection status for an MST port
2109 * @mgr: manager for this port
2110 * @port: unverified pointer to a port
2111 *
2112 * This returns the current connection state for a port. It validates the
2113 * port pointer still exists so the caller doesn't require a reference
2114 */
2115enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2116{
2117 enum drm_connector_status status = connector_status_disconnected;
2118
2119 /* we need to search for the port in the mgr in case its gone */
2120 port = drm_dp_get_validated_port_ref(mgr, port);
2121 if (!port)
2122 return connector_status_disconnected;
2123
2124 if (!port->ddps)
2125 goto out;
2126
2127 switch (port->pdt) {
2128 case DP_PEER_DEVICE_NONE:
2129 case DP_PEER_DEVICE_MST_BRANCHING:
2130 break;
2131
2132 case DP_PEER_DEVICE_SST_SINK:
2133 status = connector_status_connected;
2134 break;
2135 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2136 if (port->ldps)
2137 status = connector_status_connected;
2138 break;
2139 }
2140out:
2141 drm_dp_put_port(port);
2142 return status;
2143}
2144EXPORT_SYMBOL(drm_dp_mst_detect_port);
2145
2146/**
2147 * drm_dp_mst_get_edid() - get EDID for an MST port
2148 * @connector: toplevel connector to get EDID for
2149 * @mgr: manager for this port
2150 * @port: unverified pointer to a port.
2151 *
2152 * This returns an EDID for the port connected to a connector,
2153 * It validates the pointer still exists so the caller doesn't require a
2154 * reference.
2155 */
2156struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2157{
2158 struct edid *edid = NULL;
2159
2160 /* we need to search for the port in the mgr in case its gone */
2161 port = drm_dp_get_validated_port_ref(mgr, port);
2162 if (!port)
2163 return NULL;
2164
2165 edid = drm_get_edid(connector, &port->aux.ddc);
2166 drm_dp_put_port(port);
2167 return edid;
2168}
2169EXPORT_SYMBOL(drm_dp_mst_get_edid);
2170
2171/**
2172 * drm_dp_find_vcpi_slots() - find slots for this PBN value
2173 * @mgr: manager to use
2174 * @pbn: payload bandwidth to convert into slots.
2175 */
2176int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
2177 int pbn)
2178{
2179 int num_slots;
2180
2181 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2182
2183 if (num_slots > mgr->avail_slots)
2184 return -ENOSPC;
2185 return num_slots;
2186}
2187EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
2188
2189static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2190 struct drm_dp_vcpi *vcpi, int pbn)
2191{
2192 int num_slots;
2193 int ret;
2194
2195 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2196
2197 if (num_slots > mgr->avail_slots)
2198 return -ENOSPC;
2199
2200 vcpi->pbn = pbn;
2201 vcpi->aligned_pbn = num_slots * mgr->pbn_div;
2202 vcpi->num_slots = num_slots;
2203
2204 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
2205 if (ret < 0)
2206 return ret;
2207 return 0;
2208}
2209
2210/**
2211 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2212 * @mgr: manager for this port
2213 * @port: port to allocate a virtual channel for.
2214 * @pbn: payload bandwidth number to request
2215 * @slots: returned number of slots for this PBN.
2216 */
2217bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
2218{
2219 int ret;
2220
2221 port = drm_dp_get_validated_port_ref(mgr, port);
2222 if (!port)
2223 return false;
2224
2225 if (port->vcpi.vcpi > 0) {
2226 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
2227 if (pbn == port->vcpi.pbn) {
2228 *slots = port->vcpi.num_slots;
2229 return true;
2230 }
2231 }
2232
2233 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
2234 if (ret) {
2235 DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
2236 goto out;
2237 }
2238 DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
2239 *slots = port->vcpi.num_slots;
2240
2241 drm_dp_put_port(port);
2242 return true;
2243out:
2244 return false;
2245}
2246EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
2247
2248/**
2249 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
2250 * @mgr: manager for this port
2251 * @port: unverified pointer to a port.
2252 *
2253 * This just resets the number of slots for the ports VCPI for later programming.
2254 */
2255void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2256{
2257 port = drm_dp_get_validated_port_ref(mgr, port);
2258 if (!port)
2259 return;
2260 port->vcpi.num_slots = 0;
2261 drm_dp_put_port(port);
2262}
2263EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
2264
2265/**
2266 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
2267 * @mgr: manager for this port
2268 * @port: unverified port to deallocate vcpi for
2269 */
2270void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2271{
2272 port = drm_dp_get_validated_port_ref(mgr, port);
2273 if (!port)
2274 return;
2275
2276 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2277 port->vcpi.num_slots = 0;
2278 port->vcpi.pbn = 0;
2279 port->vcpi.aligned_pbn = 0;
2280 port->vcpi.vcpi = 0;
2281 drm_dp_put_port(port);
2282}
2283EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
2284
2285static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
2286 int id, struct drm_dp_payload *payload)
2287{
2288 u8 payload_alloc[3], status;
2289 int ret;
2290 int retries = 0;
2291
2292 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
2293 DP_PAYLOAD_TABLE_UPDATED);
2294
2295 payload_alloc[0] = id;
2296 payload_alloc[1] = payload->start_slot;
2297 payload_alloc[2] = payload->num_slots;
2298
2299 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
2300 if (ret != 3) {
2301 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
2302 goto fail;
2303 }
2304
2305retry:
2306 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2307 if (ret < 0) {
2308 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2309 goto fail;
2310 }
2311
2312 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
2313 retries++;
2314 if (retries < 20) {
2315 usleep_range(10000, 20000);
2316 goto retry;
2317 }
2318 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
2319 ret = -EINVAL;
2320 goto fail;
2321 }
2322 ret = 0;
2323fail:
2324 return ret;
2325}
2326
2327
2328/**
2329 * drm_dp_check_act_status() - Check ACT handled status.
2330 * @mgr: manager to use
2331 *
2332 * Check the payload status bits in the DPCD for ACT handled completion.
2333 */
2334int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
2335{
2336 u8 status;
2337 int ret;
2338 int count = 0;
2339
2340 do {
2341 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2342
2343 if (ret < 0) {
2344 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2345 goto fail;
2346 }
2347
2348 if (status & DP_PAYLOAD_ACT_HANDLED)
2349 break;
2350 count++;
2351 udelay(100);
2352
2353 } while (count < 30);
2354
2355 if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
2356 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
2357 ret = -EINVAL;
2358 goto fail;
2359 }
2360 return 0;
2361fail:
2362 return ret;
2363}
2364EXPORT_SYMBOL(drm_dp_check_act_status);
2365
2366/**
2367 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
2368 * @clock: dot clock for the mode
2369 * @bpp: bpp for the mode.
2370 *
2371 * This uses the formula in the spec to calculate the PBN value for a mode.
2372 */
2373int drm_dp_calc_pbn_mode(int clock, int bpp)
2374{
2375 fixed20_12 pix_bw;
2376 fixed20_12 fbpp;
2377 fixed20_12 result;
2378 fixed20_12 margin, tmp;
2379 u32 res;
2380
2381 pix_bw.full = dfixed_const(clock);
2382 fbpp.full = dfixed_const(bpp);
2383 tmp.full = dfixed_const(8);
2384 fbpp.full = dfixed_div(fbpp, tmp);
2385
2386 result.full = dfixed_mul(pix_bw, fbpp);
2387 margin.full = dfixed_const(54);
2388 tmp.full = dfixed_const(64);
2389 margin.full = dfixed_div(margin, tmp);
2390 result.full = dfixed_div(result, margin);
2391
2392 margin.full = dfixed_const(1006);
2393 tmp.full = dfixed_const(1000);
2394 margin.full = dfixed_div(margin, tmp);
2395 result.full = dfixed_mul(result, margin);
2396
2397 result.full = dfixed_div(result, tmp);
2398 result.full = dfixed_ceil(result);
2399 res = dfixed_trunc(result);
2400 return res;
2401}
2402EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2403
2404static int test_calc_pbn_mode(void)
2405{
2406 int ret;
2407 ret = drm_dp_calc_pbn_mode(154000, 30);
2408 if (ret != 689)
2409 return -EINVAL;
2410 ret = drm_dp_calc_pbn_mode(234000, 30);
2411 if (ret != 1047)
2412 return -EINVAL;
2413 return 0;
2414}
2415
2416/* we want to kick the TX after we've ack the up/down IRQs. */
2417static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
2418{
2419 queue_work(system_long_wq, &mgr->tx_work);
2420}
2421
2422static void drm_dp_mst_dump_mstb(struct seq_file *m,
2423 struct drm_dp_mst_branch *mstb)
2424{
2425 struct drm_dp_mst_port *port;
2426 int tabs = mstb->lct;
2427 char prefix[10];
2428 int i;
2429
2430 for (i = 0; i < tabs; i++)
2431 prefix[i] = '\t';
2432 prefix[i] = '\0';
2433
2434 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
2435 list_for_each_entry(port, &mstb->ports, next) {
2436 seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
2437 if (port->mstb)
2438 drm_dp_mst_dump_mstb(m, port->mstb);
2439 }
2440}
2441
2442static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
2443 char *buf)
2444{
2445 int ret;
2446 int i;
2447 for (i = 0; i < 4; i++) {
2448 ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
2449 if (ret != 16)
2450 break;
2451 }
2452 if (i == 4)
2453 return true;
2454 return false;
2455}
2456
2457/**
2458 * drm_dp_mst_dump_topology(): dump topology to seq file.
2459 * @m: seq_file to dump output to
2460 * @mgr: manager to dump current topology for.
2461 *
2462 * helper to dump MST topology to a seq file for debugfs.
2463 */
2464void drm_dp_mst_dump_topology(struct seq_file *m,
2465 struct drm_dp_mst_topology_mgr *mgr)
2466{
2467 int i;
2468 struct drm_dp_mst_port *port;
2469 mutex_lock(&mgr->lock);
2470 if (mgr->mst_primary)
2471 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
2472
2473 /* dump VCPIs */
2474 mutex_unlock(&mgr->lock);
2475
2476 mutex_lock(&mgr->payload_lock);
2477 seq_printf(m, "vcpi: %lx\n", mgr->payload_mask);
2478
2479 for (i = 0; i < mgr->max_payloads; i++) {
2480 if (mgr->proposed_vcpis[i]) {
2481 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2482 seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots);
2483 } else
2484 seq_printf(m, "vcpi %d:unsed\n", i);
2485 }
2486 for (i = 0; i < mgr->max_payloads; i++) {
2487 seq_printf(m, "payload %d: %d, %d, %d\n",
2488 i,
2489 mgr->payloads[i].payload_state,
2490 mgr->payloads[i].start_slot,
2491 mgr->payloads[i].num_slots);
2492
2493
2494 }
2495 mutex_unlock(&mgr->payload_lock);
2496
2497 mutex_lock(&mgr->lock);
2498 if (mgr->mst_primary) {
2499 u8 buf[64];
2500 bool bret;
2501 int ret;
2502 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
2503 seq_printf(m, "dpcd: ");
2504 for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++)
2505 seq_printf(m, "%02x ", buf[i]);
2506 seq_printf(m, "\n");
2507 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
2508 seq_printf(m, "faux/mst: ");
2509 for (i = 0; i < 2; i++)
2510 seq_printf(m, "%02x ", buf[i]);
2511 seq_printf(m, "\n");
2512 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
2513 seq_printf(m, "mst ctrl: ");
2514 for (i = 0; i < 1; i++)
2515 seq_printf(m, "%02x ", buf[i]);
2516 seq_printf(m, "\n");
2517
2518 bret = dump_dp_payload_table(mgr, buf);
2519 if (bret == true) {
2520 seq_printf(m, "payload table: ");
2521 for (i = 0; i < 63; i++)
2522 seq_printf(m, "%02x ", buf[i]);
2523 seq_printf(m, "\n");
2524 }
2525
2526 }
2527
2528 mutex_unlock(&mgr->lock);
2529
2530}
2531EXPORT_SYMBOL(drm_dp_mst_dump_topology);
2532
2533static void drm_dp_tx_work(struct work_struct *work)
2534{
2535 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
2536
2537 mutex_lock(&mgr->qlock);
2538 if (mgr->tx_down_in_progress)
2539 process_single_down_tx_qlock(mgr);
2540 mutex_unlock(&mgr->qlock);
2541}
2542
2543/**
2544 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2545 * @mgr: manager struct to initialise
2546 * @dev: device providing this structure - for i2c addition.
2547 * @aux: DP helper aux channel to talk to this device
2548 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
2549 * @max_payloads: maximum number of payloads this GPU can source
2550 * @conn_base_id: the connector object ID the MST device is connected to.
2551 *
2552 * Return 0 for success, or negative error code on failure
2553 */
2554int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2555 struct device *dev, struct drm_dp_aux *aux,
2556 int max_dpcd_transaction_bytes,
2557 int max_payloads, int conn_base_id)
2558{
2559 mutex_init(&mgr->lock);
2560 mutex_init(&mgr->qlock);
2561 mutex_init(&mgr->payload_lock);
2562 INIT_LIST_HEAD(&mgr->tx_msg_upq);
2563 INIT_LIST_HEAD(&mgr->tx_msg_downq);
2564 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
2565 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
2566 init_waitqueue_head(&mgr->tx_waitq);
2567 mgr->dev = dev;
2568 mgr->aux = aux;
2569 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
2570 mgr->max_payloads = max_payloads;
2571 mgr->conn_base_id = conn_base_id;
2572 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
2573 if (!mgr->payloads)
2574 return -ENOMEM;
2575 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
2576 if (!mgr->proposed_vcpis)
2577 return -ENOMEM;
2578 set_bit(0, &mgr->payload_mask);
2579 test_calc_pbn_mode();
2580 return 0;
2581}
2582EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
2583
2584/**
2585 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
2586 * @mgr: manager to destroy
2587 */
2588void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
2589{
2590 mutex_lock(&mgr->payload_lock);
2591 kfree(mgr->payloads);
2592 mgr->payloads = NULL;
2593 kfree(mgr->proposed_vcpis);
2594 mgr->proposed_vcpis = NULL;
2595 mutex_unlock(&mgr->payload_lock);
2596 mgr->dev = NULL;
2597 mgr->aux = NULL;
2598}
2599EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
2600
2601/* I2C device */
2602static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
2603 int num)
2604{
2605 struct drm_dp_aux *aux = adapter->algo_data;
2606 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
2607 struct drm_dp_mst_branch *mstb;
2608 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2609 unsigned int i;
2610 bool reading = false;
2611 struct drm_dp_sideband_msg_req_body msg;
2612 struct drm_dp_sideband_msg_tx *txmsg = NULL;
2613 int ret;
2614
2615 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
2616 if (!mstb)
2617 return -EREMOTEIO;
2618
2619 /* construct i2c msg */
2620 /* see if last msg is a read */
2621 if (msgs[num - 1].flags & I2C_M_RD)
2622 reading = true;
2623
2624 if (!reading) {
2625 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
2626 ret = -EIO;
2627 goto out;
2628 }
2629
2630 msg.req_type = DP_REMOTE_I2C_READ;
2631 msg.u.i2c_read.num_transactions = num - 1;
2632 msg.u.i2c_read.port_number = port->port_num;
2633 for (i = 0; i < num - 1; i++) {
2634 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
2635 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
2636 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
2637 }
2638 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
2639 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
2640
2641 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2642 if (!txmsg) {
2643 ret = -ENOMEM;
2644 goto out;
2645 }
2646
2647 txmsg->dst = mstb;
2648 drm_dp_encode_sideband_req(&msg, txmsg);
2649
2650 drm_dp_queue_down_tx(mgr, txmsg);
2651
2652 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2653 if (ret > 0) {
2654
2655 if (txmsg->reply.reply_type == 1) { /* got a NAK back */
2656 ret = -EREMOTEIO;
2657 goto out;
2658 }
2659 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
2660 ret = -EIO;
2661 goto out;
2662 }
2663 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
2664 ret = num;
2665 }
2666out:
2667 kfree(txmsg);
2668 drm_dp_put_mst_branch_device(mstb);
2669 return ret;
2670}
2671
2672static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
2673{
2674 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
2675 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
2676 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
2677 I2C_FUNC_10BIT_ADDR;
2678}
2679
2680static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
2681 .functionality = drm_dp_mst_i2c_functionality,
2682 .master_xfer = drm_dp_mst_i2c_xfer,
2683};
2684
2685/**
2686 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
2687 * @aux: DisplayPort AUX channel
2688 *
2689 * Returns 0 on success or a negative error code on failure.
2690 */
2691static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
2692{
2693 aux->ddc.algo = &drm_dp_mst_i2c_algo;
2694 aux->ddc.algo_data = aux;
2695 aux->ddc.retries = 3;
2696
2697 aux->ddc.class = I2C_CLASS_DDC;
2698 aux->ddc.owner = THIS_MODULE;
2699 aux->ddc.dev.parent = aux->dev;
2700 aux->ddc.dev.of_node = aux->dev->of_node;
2701
2702 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
2703 sizeof(aux->ddc.name));
2704
2705 return i2c_add_adapter(&aux->ddc);
2706}
2707
2708/**
2709 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
2710 * @aux: DisplayPort AUX channel
2711 */
2712static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
2713{
2714 i2c_del_adapter(&aux->ddc);
2715}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 8218078b6133..3242e208c0d0 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -1,31 +1,11 @@
1/**
2 * \file drm_drv.c
3 * Generic driver template
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 *
8 * To use this template, you must at least define the following (samples
9 * given for the MGA driver):
10 *
11 * \code
12 * #define DRIVER_AUTHOR "VA Linux Systems, Inc."
13 *
14 * #define DRIVER_NAME "mga"
15 * #define DRIVER_DESC "Matrox G200/G400"
16 * #define DRIVER_DATE "20001127"
17 *
18 * #define drm_x mga_##x
19 * \endcode
20 */
21
22/* 1/*
23 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com 2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
24 * 3 *
25 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. 4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
26 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
27 * All Rights Reserved. 5 * All Rights Reserved.
28 * 6 *
7 * Author Rickard E. (Rik) Faith <faith@valinux.com>
8 *
29 * Permission is hereby granted, free of charge, to any person obtaining a 9 * Permission is hereby granted, free of charge, to any person obtaining a
30 * copy of this software and associated documentation files (the "Software"), 10 * copy of this software and associated documentation files (the "Software"),
31 * to deal in the Software without restriction, including without limitation 11 * to deal in the Software without restriction, including without limitation
@@ -40,432 +20,906 @@
40 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
41 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
42 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
43 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
44 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
45 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
46 * OTHER DEALINGS IN THE SOFTWARE. 26 * DEALINGS IN THE SOFTWARE.
47 */ 27 */
48 28
49#include <linux/debugfs.h> 29#include <linux/debugfs.h>
30#include <linux/fs.h>
31#include <linux/module.h>
32#include <linux/moduleparam.h>
33#include <linux/mount.h>
50#include <linux/slab.h> 34#include <linux/slab.h>
51#include <linux/export.h>
52#include <drm/drmP.h> 35#include <drm/drmP.h>
53#include <drm/drm_core.h> 36#include <drm/drm_core.h>
37#include "drm_legacy.h"
54 38
39unsigned int drm_debug = 0; /* 1 to enable debug output */
40EXPORT_SYMBOL(drm_debug);
55 41
56static int drm_version(struct drm_device *dev, void *data, 42unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
57 struct drm_file *file_priv);
58
59#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
60 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
61
62/** Ioctl table */
63static const struct drm_ioctl_desc drm_ioctls[] = {
64 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
65 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
66 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
67 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
68 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
69 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
70 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
71 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
72 DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
73 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
74
75 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
76 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
77 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
78 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
79
80 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
81 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
82
83 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
84 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
85
86 DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
87 DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
88
89 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
90 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
91 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
92 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
93 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
94 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
95 DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
96
97 DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
98 DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
99
100 DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
101 DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
102
103 DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
104
105 DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
106 DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
107 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
108 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
109 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
110 DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
111
112 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
113
114#if __OS_HAS_AGP
115 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
116 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
117 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
118 DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
119 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
120 DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
121 DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
122 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
123#endif
124
125 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
126 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
127
128 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
129
130 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
131
132 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
133
134 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
135 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
136 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
137
138 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
139
140 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
141 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
142
143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
144 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
145 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
146 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
147 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
148 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
149 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
150 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
151 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
152 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
153 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
154 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
155 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
156 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
157 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
158 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
159 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
160 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
161 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
162 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
163 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
164 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
165 DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
166 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
167 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
168 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
169 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
170};
171 43
172#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 44unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
173 45
174/** File operations structure */ 46/*
175static const struct file_operations drm_stub_fops = { 47 * Default to use monotonic timestamps for wait-for-vblank and page-flip
176 .owner = THIS_MODULE, 48 * complete events.
177 .open = drm_stub_open, 49 */
178 .llseek = noop_llseek, 50unsigned int drm_timestamp_monotonic = 1;
179};
180 51
181static int __init drm_core_init(void) 52MODULE_AUTHOR(CORE_AUTHOR);
53MODULE_DESCRIPTION(CORE_DESC);
54MODULE_LICENSE("GPL and additional rights");
55MODULE_PARM_DESC(debug, "Enable debug output");
56MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
57MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
58MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
59
60module_param_named(debug, drm_debug, int, 0600);
61module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
62module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
63module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
64
65static DEFINE_SPINLOCK(drm_minor_lock);
66static struct idr drm_minors_idr;
67
68struct class *drm_class;
69static struct dentry *drm_debugfs_root;
70
71int drm_err(const char *func, const char *format, ...)
182{ 72{
183 int ret = -ENOMEM; 73 struct va_format vaf;
74 va_list args;
75 int r;
184 76
185 drm_global_init(); 77 va_start(args, format);
186 drm_connector_ida_init();
187 idr_init(&drm_minors_idr);
188 78
189 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) 79 vaf.fmt = format;
190 goto err_p1; 80 vaf.va = &args;
191 81
192 drm_class = drm_sysfs_create(THIS_MODULE, "drm"); 82 r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
193 if (IS_ERR(drm_class)) { 83
194 printk(KERN_ERR "DRM: Error creating drm class.\n"); 84 va_end(args);
195 ret = PTR_ERR(drm_class); 85
196 goto err_p2; 86 return r;
87}
88EXPORT_SYMBOL(drm_err);
89
90void drm_ut_debug_printk(const char *function_name, const char *format, ...)
91{
92 struct va_format vaf;
93 va_list args;
94
95 va_start(args, format);
96 vaf.fmt = format;
97 vaf.va = &args;
98
99 printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
100
101 va_end(args);
102}
103EXPORT_SYMBOL(drm_ut_debug_printk);
104
105struct drm_master *drm_master_create(struct drm_minor *minor)
106{
107 struct drm_master *master;
108
109 master = kzalloc(sizeof(*master), GFP_KERNEL);
110 if (!master)
111 return NULL;
112
113 kref_init(&master->refcount);
114 spin_lock_init(&master->lock.spinlock);
115 init_waitqueue_head(&master->lock.lock_queue);
116 if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
117 kfree(master);
118 return NULL;
197 } 119 }
120 INIT_LIST_HEAD(&master->magicfree);
121 master->minor = minor;
198 122
199 drm_debugfs_root = debugfs_create_dir("dri", NULL); 123 return master;
200 if (!drm_debugfs_root) { 124}
201 DRM_ERROR("Cannot create /sys/kernel/debug/dri\n"); 125
202 ret = -1; 126struct drm_master *drm_master_get(struct drm_master *master)
203 goto err_p3; 127{
128 kref_get(&master->refcount);
129 return master;
130}
131EXPORT_SYMBOL(drm_master_get);
132
133static void drm_master_destroy(struct kref *kref)
134{
135 struct drm_master *master = container_of(kref, struct drm_master, refcount);
136 struct drm_magic_entry *pt, *next;
137 struct drm_device *dev = master->minor->dev;
138 struct drm_map_list *r_list, *list_temp;
139
140 mutex_lock(&dev->struct_mutex);
141 if (dev->driver->master_destroy)
142 dev->driver->master_destroy(dev, master);
143
144 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
145 if (r_list->master == master) {
146 drm_rmmap_locked(dev, r_list->map);
147 r_list = NULL;
148 }
204 } 149 }
205 150
206 DRM_INFO("Initialized %s %d.%d.%d %s\n", 151 if (master->unique) {
207 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); 152 kfree(master->unique);
208 return 0; 153 master->unique = NULL;
209err_p3: 154 master->unique_len = 0;
210 drm_sysfs_destroy(); 155 }
211err_p2:
212 unregister_chrdev(DRM_MAJOR, "drm");
213 156
214 idr_destroy(&drm_minors_idr); 157 list_for_each_entry_safe(pt, next, &master->magicfree, head) {
215err_p1: 158 list_del(&pt->head);
216 return ret; 159 drm_ht_remove_item(&master->magiclist, &pt->hash_item);
160 kfree(pt);
161 }
162
163 drm_ht_remove(&master->magiclist);
164
165 mutex_unlock(&dev->struct_mutex);
166 kfree(master);
217} 167}
218 168
219static void __exit drm_core_exit(void) 169void drm_master_put(struct drm_master **master)
220{ 170{
221 debugfs_remove(drm_debugfs_root); 171 kref_put(&(*master)->refcount, drm_master_destroy);
222 drm_sysfs_destroy(); 172 *master = NULL;
173}
174EXPORT_SYMBOL(drm_master_put);
223 175
224 unregister_chrdev(DRM_MAJOR, "drm"); 176int drm_setmaster_ioctl(struct drm_device *dev, void *data,
177 struct drm_file *file_priv)
178{
179 int ret = 0;
225 180
226 drm_connector_ida_destroy(); 181 mutex_lock(&dev->master_mutex);
227 idr_destroy(&drm_minors_idr); 182 if (file_priv->is_master)
183 goto out_unlock;
184
185 if (file_priv->minor->master) {
186 ret = -EINVAL;
187 goto out_unlock;
188 }
189
190 if (!file_priv->master) {
191 ret = -EINVAL;
192 goto out_unlock;
193 }
194
195 file_priv->minor->master = drm_master_get(file_priv->master);
196 file_priv->is_master = 1;
197 if (dev->driver->master_set) {
198 ret = dev->driver->master_set(dev, file_priv, false);
199 if (unlikely(ret != 0)) {
200 file_priv->is_master = 0;
201 drm_master_put(&file_priv->minor->master);
202 }
203 }
204
205out_unlock:
206 mutex_unlock(&dev->master_mutex);
207 return ret;
228} 208}
229 209
230module_init(drm_core_init); 210int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
231module_exit(drm_core_exit); 211 struct drm_file *file_priv)
212{
213 int ret = -EINVAL;
232 214
233/** 215 mutex_lock(&dev->master_mutex);
234 * Copy and IOCTL return string to user space 216 if (!file_priv->is_master)
217 goto out_unlock;
218
219 if (!file_priv->minor->master)
220 goto out_unlock;
221
222 ret = 0;
223 if (dev->driver->master_drop)
224 dev->driver->master_drop(dev, file_priv, false);
225 drm_master_put(&file_priv->minor->master);
226 file_priv->is_master = 0;
227
228out_unlock:
229 mutex_unlock(&dev->master_mutex);
230 return ret;
231}
232
233/*
234 * DRM Minors
235 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
236 * of them is represented by a drm_minor object. Depending on the capabilities
237 * of the device-driver, different interfaces are registered.
238 *
239 * Minors can be accessed via dev->$minor_name. This pointer is either
240 * NULL or a valid drm_minor pointer and stays valid as long as the device is
241 * valid. This means, DRM minors have the same life-time as the underlying
242 * device. However, this doesn't mean that the minor is active. Minors are
243 * registered and unregistered dynamically according to device-state.
235 */ 244 */
236static int drm_copy_field(char *buf, size_t *buf_len, const char *value) 245
246static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
247 unsigned int type)
237{ 248{
238 int len; 249 switch (type) {
250 case DRM_MINOR_LEGACY:
251 return &dev->primary;
252 case DRM_MINOR_RENDER:
253 return &dev->render;
254 case DRM_MINOR_CONTROL:
255 return &dev->control;
256 default:
257 return NULL;
258 }
259}
239 260
240 /* don't overflow userbuf */ 261static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
241 len = strlen(value); 262{
242 if (len > *buf_len) 263 struct drm_minor *minor;
243 len = *buf_len; 264 unsigned long flags;
265 int r;
266
267 minor = kzalloc(sizeof(*minor), GFP_KERNEL);
268 if (!minor)
269 return -ENOMEM;
270
271 minor->type = type;
272 minor->dev = dev;
273
274 idr_preload(GFP_KERNEL);
275 spin_lock_irqsave(&drm_minor_lock, flags);
276 r = idr_alloc(&drm_minors_idr,
277 NULL,
278 64 * type,
279 64 * (type + 1),
280 GFP_NOWAIT);
281 spin_unlock_irqrestore(&drm_minor_lock, flags);
282 idr_preload_end();
283
284 if (r < 0)
285 goto err_free;
286
287 minor->index = r;
288
289 minor->kdev = drm_sysfs_minor_alloc(minor);
290 if (IS_ERR(minor->kdev)) {
291 r = PTR_ERR(minor->kdev);
292 goto err_index;
293 }
244 294
245 /* let userspace know exact length of driver value (which could be 295 *drm_minor_get_slot(dev, type) = minor;
246 * larger than the userspace-supplied buffer) */ 296 return 0;
247 *buf_len = strlen(value); 297
298err_index:
299 spin_lock_irqsave(&drm_minor_lock, flags);
300 idr_remove(&drm_minors_idr, minor->index);
301 spin_unlock_irqrestore(&drm_minor_lock, flags);
302err_free:
303 kfree(minor);
304 return r;
305}
306
307static void drm_minor_free(struct drm_device *dev, unsigned int type)
308{
309 struct drm_minor **slot, *minor;
310 unsigned long flags;
311
312 slot = drm_minor_get_slot(dev, type);
313 minor = *slot;
314 if (!minor)
315 return;
316
317 drm_mode_group_destroy(&minor->mode_group);
318 put_device(minor->kdev);
319
320 spin_lock_irqsave(&drm_minor_lock, flags);
321 idr_remove(&drm_minors_idr, minor->index);
322 spin_unlock_irqrestore(&drm_minor_lock, flags);
323
324 kfree(minor);
325 *slot = NULL;
326}
248 327
249 /* finally, try filling in the userbuf */ 328static int drm_minor_register(struct drm_device *dev, unsigned int type)
250 if (len && buf) 329{
251 if (copy_to_user(buf, value, len)) 330 struct drm_minor *minor;
252 return -EFAULT; 331 unsigned long flags;
332 int ret;
333
334 DRM_DEBUG("\n");
335
336 minor = *drm_minor_get_slot(dev, type);
337 if (!minor)
338 return 0;
339
340 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
341 if (ret) {
342 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
343 return ret;
344 }
345
346 ret = device_add(minor->kdev);
347 if (ret)
348 goto err_debugfs;
349
350 /* replace NULL with @minor so lookups will succeed from now on */
351 spin_lock_irqsave(&drm_minor_lock, flags);
352 idr_replace(&drm_minors_idr, minor, minor->index);
353 spin_unlock_irqrestore(&drm_minor_lock, flags);
354
355 DRM_DEBUG("new minor registered %d\n", minor->index);
253 return 0; 356 return 0;
357
358err_debugfs:
359 drm_debugfs_cleanup(minor);
360 return ret;
361}
362
363static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
364{
365 struct drm_minor *minor;
366 unsigned long flags;
367
368 minor = *drm_minor_get_slot(dev, type);
369 if (!minor || !device_is_registered(minor->kdev))
370 return;
371
372 /* replace @minor with NULL so lookups will fail from now on */
373 spin_lock_irqsave(&drm_minor_lock, flags);
374 idr_replace(&drm_minors_idr, NULL, minor->index);
375 spin_unlock_irqrestore(&drm_minor_lock, flags);
376
377 device_del(minor->kdev);
378 dev_set_drvdata(minor->kdev, NULL); /* safety belt */
379 drm_debugfs_cleanup(minor);
254} 380}
255 381
256/** 382/**
257 * Get version information 383 * drm_minor_acquire - Acquire a DRM minor
384 * @minor_id: Minor ID of the DRM-minor
385 *
386 * Looks up the given minor-ID and returns the respective DRM-minor object. The
387 * refence-count of the underlying device is increased so you must release this
388 * object with drm_minor_release().
258 * 389 *
259 * \param inode device inode. 390 * As long as you hold this minor, it is guaranteed that the object and the
260 * \param filp file pointer. 391 * minor->dev pointer will stay valid! However, the device may get unplugged and
261 * \param cmd command. 392 * unregistered while you hold the minor.
262 * \param arg user argument, pointing to a drm_version structure.
263 * \return zero on success or negative number on failure.
264 * 393 *
265 * Fills in the version information in \p arg. 394 * Returns:
395 * Pointer to minor-object with increased device-refcount, or PTR_ERR on
396 * failure.
266 */ 397 */
267static int drm_version(struct drm_device *dev, void *data, 398struct drm_minor *drm_minor_acquire(unsigned int minor_id)
268 struct drm_file *file_priv)
269{ 399{
270 struct drm_version *version = data; 400 struct drm_minor *minor;
271 int err; 401 unsigned long flags;
402
403 spin_lock_irqsave(&drm_minor_lock, flags);
404 minor = idr_find(&drm_minors_idr, minor_id);
405 if (minor)
406 drm_dev_ref(minor->dev);
407 spin_unlock_irqrestore(&drm_minor_lock, flags);
408
409 if (!minor) {
410 return ERR_PTR(-ENODEV);
411 } else if (drm_device_is_unplugged(minor->dev)) {
412 drm_dev_unref(minor->dev);
413 return ERR_PTR(-ENODEV);
414 }
272 415
273 version->version_major = dev->driver->major; 416 return minor;
274 version->version_minor = dev->driver->minor; 417}
275 version->version_patchlevel = dev->driver->patchlevel;
276 err = drm_copy_field(version->name, &version->name_len,
277 dev->driver->name);
278 if (!err)
279 err = drm_copy_field(version->date, &version->date_len,
280 dev->driver->date);
281 if (!err)
282 err = drm_copy_field(version->desc, &version->desc_len,
283 dev->driver->desc);
284 418
285 return err; 419/**
420 * drm_minor_release - Release DRM minor
421 * @minor: Pointer to DRM minor object
422 *
423 * Release a minor that was previously acquired via drm_minor_acquire().
424 */
425void drm_minor_release(struct drm_minor *minor)
426{
427 drm_dev_unref(minor->dev);
286} 428}
287 429
288/** 430/**
289 * drm_ioctl_permit - Check ioctl permissions against caller 431 * drm_put_dev - Unregister and release a DRM device
432 * @dev: DRM device
433 *
434 * Called at module unload time or when a PCI device is unplugged.
290 * 435 *
291 * @flags: ioctl permission flags. 436 * Use of this function is discouraged. It will eventually go away completely.
292 * @file_priv: Pointer to struct drm_file identifying the caller. 437 * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
293 * 438 *
294 * Checks whether the caller is allowed to run an ioctl with the 439 * Cleans up all DRM device, calling drm_lastclose().
295 * indicated permissions. If so, returns zero. Otherwise returns an
296 * error code suitable for ioctl return.
297 */ 440 */
298static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) 441void drm_put_dev(struct drm_device *dev)
299{ 442{
300 /* ROOT_ONLY is only for CAP_SYS_ADMIN */ 443 DRM_DEBUG("\n");
301 if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
302 return -EACCES;
303
304 /* AUTH is only for authenticated or render client */
305 if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
306 !file_priv->authenticated))
307 return -EACCES;
308
309 /* MASTER is only for master or control clients */
310 if (unlikely((flags & DRM_MASTER) && !file_priv->is_master &&
311 !drm_is_control_client(file_priv)))
312 return -EACCES;
313
314 /* Control clients must be explicitly allowed */
315 if (unlikely(!(flags & DRM_CONTROL_ALLOW) &&
316 drm_is_control_client(file_priv)))
317 return -EACCES;
318
319 /* Render clients must be explicitly allowed */
320 if (unlikely(!(flags & DRM_RENDER_ALLOW) &&
321 drm_is_render_client(file_priv)))
322 return -EACCES;
323 444
324 return 0; 445 if (!dev) {
446 DRM_ERROR("cleanup called no dev\n");
447 return;
448 }
449
450 drm_dev_unregister(dev);
451 drm_dev_unref(dev);
452}
453EXPORT_SYMBOL(drm_put_dev);
454
455void drm_unplug_dev(struct drm_device *dev)
456{
457 /* for a USB device */
458 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
459 drm_minor_unregister(dev, DRM_MINOR_RENDER);
460 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
461
462 mutex_lock(&drm_global_mutex);
463
464 drm_device_set_unplugged(dev);
465
466 if (dev->open_count == 0) {
467 drm_put_dev(dev);
468 }
469 mutex_unlock(&drm_global_mutex);
470}
471EXPORT_SYMBOL(drm_unplug_dev);
472
473/*
474 * DRM internal mount
475 * We want to be able to allocate our own "struct address_space" to control
476 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
477 * stand-alone address_space objects, so we need an underlying inode. As there
478 * is no way to allocate an independent inode easily, we need a fake internal
479 * VFS mount-point.
480 *
481 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
482 * frees it again. You are allowed to use iget() and iput() to get references to
483 * the inode. But each drm_fs_inode_new() call must be paired with exactly one
484 * drm_fs_inode_free() call (which does not have to be the last iput()).
485 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
486 * between multiple inode-users. You could, technically, call
487 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
488 * iput(), but this way you'd end up with a new vfsmount for each inode.
489 */
490
491static int drm_fs_cnt;
492static struct vfsmount *drm_fs_mnt;
493
494static const struct dentry_operations drm_fs_dops = {
495 .d_dname = simple_dname,
496};
497
498static const struct super_operations drm_fs_sops = {
499 .statfs = simple_statfs,
500};
501
502static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
503 const char *dev_name, void *data)
504{
505 return mount_pseudo(fs_type,
506 "drm:",
507 &drm_fs_sops,
508 &drm_fs_dops,
509 0x010203ff);
510}
511
512static struct file_system_type drm_fs_type = {
513 .name = "drm",
514 .owner = THIS_MODULE,
515 .mount = drm_fs_mount,
516 .kill_sb = kill_anon_super,
517};
518
519static struct inode *drm_fs_inode_new(void)
520{
521 struct inode *inode;
522 int r;
523
524 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
525 if (r < 0) {
526 DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
527 return ERR_PTR(r);
528 }
529
530 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
531 if (IS_ERR(inode))
532 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
533
534 return inode;
535}
536
537static void drm_fs_inode_free(struct inode *inode)
538{
539 if (inode) {
540 iput(inode);
541 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
542 }
325} 543}
326 544
327/** 545/**
328 * Called whenever a process performs an ioctl on /dev/drm. 546 * drm_dev_alloc - Allocate new DRM device
547 * @driver: DRM driver to allocate device for
548 * @parent: Parent device object
329 * 549 *
330 * \param inode device inode. 550 * Allocate and initialize a new DRM device. No device registration is done.
331 * \param file_priv DRM file private. 551 * Call drm_dev_register() to advertice the device to user space and register it
332 * \param cmd command. 552 * with other core subsystems.
333 * \param arg user argument.
334 * \return zero on success or negative number on failure.
335 * 553 *
336 * Looks up the ioctl function in the ::ioctls table, checking for root 554 * The initial ref-count of the object is 1. Use drm_dev_ref() and
337 * previleges if so required, and dispatches to the respective function. 555 * drm_dev_unref() to take and drop further ref-counts.
556 *
557 * RETURNS:
558 * Pointer to new DRM device, or NULL if out of memory.
338 */ 559 */
339long drm_ioctl(struct file *filp, 560struct drm_device *drm_dev_alloc(struct drm_driver *driver,
340 unsigned int cmd, unsigned long arg) 561 struct device *parent)
341{ 562{
342 struct drm_file *file_priv = filp->private_data;
343 struct drm_device *dev; 563 struct drm_device *dev;
344 const struct drm_ioctl_desc *ioctl = NULL; 564 int ret;
345 drm_ioctl_t *func; 565
346 unsigned int nr = DRM_IOCTL_NR(cmd); 566 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
347 int retcode = -EINVAL; 567 if (!dev)
348 char stack_kdata[128]; 568 return NULL;
349 char *kdata = NULL; 569
350 unsigned int usize, asize; 570 kref_init(&dev->ref);
351 571 dev->dev = parent;
352 dev = file_priv->minor->dev; 572 dev->driver = driver;
353 573
354 if (drm_device_is_unplugged(dev)) 574 INIT_LIST_HEAD(&dev->filelist);
355 return -ENODEV; 575 INIT_LIST_HEAD(&dev->ctxlist);
356 576 INIT_LIST_HEAD(&dev->vmalist);
357 if ((nr >= DRM_CORE_IOCTL_COUNT) && 577 INIT_LIST_HEAD(&dev->maplist);
358 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) 578 INIT_LIST_HEAD(&dev->vblank_event_list);
359 goto err_i1; 579
360 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && 580 spin_lock_init(&dev->buf_lock);
361 (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { 581 spin_lock_init(&dev->event_lock);
362 u32 drv_size; 582 mutex_init(&dev->struct_mutex);
363 ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; 583 mutex_init(&dev->ctxlist_mutex);
364 drv_size = _IOC_SIZE(ioctl->cmd_drv); 584 mutex_init(&dev->master_mutex);
365 usize = asize = _IOC_SIZE(cmd); 585
366 if (drv_size > asize) 586 dev->anon_inode = drm_fs_inode_new();
367 asize = drv_size; 587 if (IS_ERR(dev->anon_inode)) {
368 cmd = ioctl->cmd_drv; 588 ret = PTR_ERR(dev->anon_inode);
589 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
590 goto err_free;
369 } 591 }
370 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
371 u32 drv_size;
372
373 ioctl = &drm_ioctls[nr];
374 592
375 drv_size = _IOC_SIZE(ioctl->cmd); 593 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
376 usize = asize = _IOC_SIZE(cmd); 594 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
377 if (drv_size > asize) 595 if (ret)
378 asize = drv_size; 596 goto err_minors;
597 }
379 598
380 cmd = ioctl->cmd; 599 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
381 } else 600 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
382 goto err_i1; 601 if (ret)
602 goto err_minors;
603 }
383 604
384 DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", 605 ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
385 task_pid_nr(current), 606 if (ret)
386 (long)old_encode_dev(file_priv->minor->kdev->devt), 607 goto err_minors;
387 file_priv->authenticated, ioctl->name);
388 608
389 /* Do not trust userspace, use our own definition */ 609 if (drm_ht_create(&dev->map_hash, 12))
390 func = ioctl->func; 610 goto err_minors;
391 611
392 if (unlikely(!func)) { 612 ret = drm_legacy_ctxbitmap_init(dev);
393 DRM_DEBUG("no function\n"); 613 if (ret) {
394 retcode = -EINVAL; 614 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
395 goto err_i1; 615 goto err_ht;
396 } 616 }
397 617
398 retcode = drm_ioctl_permit(ioctl->flags, file_priv); 618 if (driver->driver_features & DRIVER_GEM) {
399 if (unlikely(retcode)) 619 ret = drm_gem_init(dev);
400 goto err_i1; 620 if (ret) {
401 621 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
402 if (cmd & (IOC_IN | IOC_OUT)) { 622 goto err_ctxbitmap;
403 if (asize <= sizeof(stack_kdata)) {
404 kdata = stack_kdata;
405 } else {
406 kdata = kmalloc(asize, GFP_KERNEL);
407 if (!kdata) {
408 retcode = -ENOMEM;
409 goto err_i1;
410 }
411 } 623 }
412 if (asize > usize)
413 memset(kdata + usize, 0, asize - usize);
414 } 624 }
415 625
416 if (cmd & IOC_IN) { 626 return dev;
417 if (copy_from_user(kdata, (void __user *)arg, 627
418 usize) != 0) { 628err_ctxbitmap:
419 retcode = -EFAULT; 629 drm_legacy_ctxbitmap_cleanup(dev);
420 goto err_i1; 630err_ht:
421 } 631 drm_ht_remove(&dev->map_hash);
422 } else if (cmd & IOC_OUT) { 632err_minors:
423 memset(kdata, 0, usize); 633 drm_minor_free(dev, DRM_MINOR_LEGACY);
424 } 634 drm_minor_free(dev, DRM_MINOR_RENDER);
635 drm_minor_free(dev, DRM_MINOR_CONTROL);
636 drm_fs_inode_free(dev->anon_inode);
637err_free:
638 mutex_destroy(&dev->master_mutex);
639 kfree(dev);
640 return NULL;
641}
642EXPORT_SYMBOL(drm_dev_alloc);
643
644static void drm_dev_release(struct kref *ref)
645{
646 struct drm_device *dev = container_of(ref, struct drm_device, ref);
647
648 if (dev->driver->driver_features & DRIVER_GEM)
649 drm_gem_destroy(dev);
650
651 drm_legacy_ctxbitmap_cleanup(dev);
652 drm_ht_remove(&dev->map_hash);
653 drm_fs_inode_free(dev->anon_inode);
654
655 drm_minor_free(dev, DRM_MINOR_LEGACY);
656 drm_minor_free(dev, DRM_MINOR_RENDER);
657 drm_minor_free(dev, DRM_MINOR_CONTROL);
658
659 mutex_destroy(&dev->master_mutex);
660 kfree(dev->unique);
661 kfree(dev);
662}
425 663
426 if (ioctl->flags & DRM_UNLOCKED) 664/**
427 retcode = func(dev, kdata, file_priv); 665 * drm_dev_ref - Take reference of a DRM device
428 else { 666 * @dev: device to take reference of or NULL
429 mutex_lock(&drm_global_mutex); 667 *
430 retcode = func(dev, kdata, file_priv); 668 * This increases the ref-count of @dev by one. You *must* already own a
431 mutex_unlock(&drm_global_mutex); 669 * reference when calling this. Use drm_dev_unref() to drop this reference
670 * again.
671 *
672 * This function never fails. However, this function does not provide *any*
673 * guarantee whether the device is alive or running. It only provides a
674 * reference to the object and the memory associated with it.
675 */
676void drm_dev_ref(struct drm_device *dev)
677{
678 if (dev)
679 kref_get(&dev->ref);
680}
681EXPORT_SYMBOL(drm_dev_ref);
682
683/**
684 * drm_dev_unref - Drop reference of a DRM device
685 * @dev: device to drop reference of or NULL
686 *
687 * This decreases the ref-count of @dev by one. The device is destroyed if the
688 * ref-count drops to zero.
689 */
690void drm_dev_unref(struct drm_device *dev)
691{
692 if (dev)
693 kref_put(&dev->ref, drm_dev_release);
694}
695EXPORT_SYMBOL(drm_dev_unref);
696
697/**
698 * drm_dev_register - Register DRM device
699 * @dev: Device to register
700 * @flags: Flags passed to the driver's .load() function
701 *
702 * Register the DRM device @dev with the system, advertise device to user-space
703 * and start normal device operation. @dev must be allocated via drm_dev_alloc()
704 * previously.
705 *
706 * Never call this twice on any device!
707 *
708 * RETURNS:
709 * 0 on success, negative error code on failure.
710 */
711int drm_dev_register(struct drm_device *dev, unsigned long flags)
712{
713 int ret;
714
715 mutex_lock(&drm_global_mutex);
716
717 ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
718 if (ret)
719 goto err_minors;
720
721 ret = drm_minor_register(dev, DRM_MINOR_RENDER);
722 if (ret)
723 goto err_minors;
724
725 ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
726 if (ret)
727 goto err_minors;
728
729 if (dev->driver->load) {
730 ret = dev->driver->load(dev, flags);
731 if (ret)
732 goto err_minors;
432 } 733 }
433 734
434 if (cmd & IOC_OUT) { 735 /* setup grouping for legacy outputs */
435 if (copy_to_user((void __user *)arg, kdata, 736 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
436 usize) != 0) 737 ret = drm_mode_group_init_legacy_group(dev,
437 retcode = -EFAULT; 738 &dev->primary->mode_group);
739 if (ret)
740 goto err_unload;
438 } 741 }
439 742
440 err_i1: 743 ret = 0;
441 if (!ioctl) 744 goto out_unlock;
442 DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n", 745
443 task_pid_nr(current), 746err_unload:
444 (long)old_encode_dev(file_priv->minor->kdev->devt), 747 if (dev->driver->unload)
445 file_priv->authenticated, cmd, nr); 748 dev->driver->unload(dev);
446 749err_minors:
447 if (kdata != stack_kdata) 750 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
448 kfree(kdata); 751 drm_minor_unregister(dev, DRM_MINOR_RENDER);
449 if (retcode) 752 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
450 DRM_DEBUG("ret = %d\n", retcode); 753out_unlock:
451 return retcode; 754 mutex_unlock(&drm_global_mutex);
755 return ret;
452} 756}
453EXPORT_SYMBOL(drm_ioctl); 757EXPORT_SYMBOL(drm_dev_register);
454 758
455/** 759/**
456 * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags 760 * drm_dev_unregister - Unregister DRM device
761 * @dev: Device to unregister
762 *
763 * Unregister the DRM device from the system. This does the reverse of
764 * drm_dev_register() but does not deallocate the device. The caller must call
765 * drm_dev_unref() to drop their final reference.
766 */
767void drm_dev_unregister(struct drm_device *dev)
768{
769 struct drm_map_list *r_list, *list_temp;
770
771 drm_lastclose(dev);
772
773 if (dev->driver->unload)
774 dev->driver->unload(dev);
775
776 if (dev->agp)
777 drm_pci_agp_destroy(dev);
778
779 drm_vblank_cleanup(dev);
780
781 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
782 drm_rmmap(dev, r_list->map);
783
784 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
785 drm_minor_unregister(dev, DRM_MINOR_RENDER);
786 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
787}
788EXPORT_SYMBOL(drm_dev_unregister);
789
790/**
791 * drm_dev_set_unique - Set the unique name of a DRM device
792 * @dev: device of which to set the unique name
793 * @fmt: format string for unique name
794 *
795 * Sets the unique name of a DRM device using the specified format string and
796 * a variable list of arguments. Drivers can use this at driver probe time if
797 * the unique name of the devices they drive is static.
457 * 798 *
458 * @nr: Ioctl number. 799 * Return: 0 on success or a negative error code on failure.
459 * @flags: Where to return the ioctl permission flags
460 */ 800 */
461bool drm_ioctl_flags(unsigned int nr, unsigned int *flags) 801int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
462{ 802{
463 if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) || 803 va_list ap;
464 (nr < DRM_COMMAND_BASE)) { 804
465 *flags = drm_ioctls[nr].flags; 805 kfree(dev->unique);
466 return true; 806
807 va_start(ap, fmt);
808 dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
809 va_end(ap);
810
811 return dev->unique ? 0 : -ENOMEM;
812}
813EXPORT_SYMBOL(drm_dev_set_unique);
814
815/*
816 * DRM Core
817 * The DRM core module initializes all global DRM objects and makes them
818 * available to drivers. Once setup, drivers can probe their respective
819 * devices.
820 * Currently, core management includes:
821 * - The "DRM-Global" key/value database
822 * - Global ID management for connectors
823 * - DRM major number allocation
824 * - DRM minor management
825 * - DRM sysfs class
826 * - DRM debugfs root
827 *
828 * Furthermore, the DRM core provides dynamic char-dev lookups. For each
829 * interface registered on a DRM device, you can request minor numbers from DRM
830 * core. DRM core takes care of major-number management and char-dev
831 * registration. A stub ->open() callback forwards any open() requests to the
832 * registered minor.
833 */
834
835static int drm_stub_open(struct inode *inode, struct file *filp)
836{
837 const struct file_operations *new_fops;
838 struct drm_minor *minor;
839 int err;
840
841 DRM_DEBUG("\n");
842
843 mutex_lock(&drm_global_mutex);
844 minor = drm_minor_acquire(iminor(inode));
845 if (IS_ERR(minor)) {
846 err = PTR_ERR(minor);
847 goto out_unlock;
848 }
849
850 new_fops = fops_get(minor->dev->driver->fops);
851 if (!new_fops) {
852 err = -ENODEV;
853 goto out_release;
467 } 854 }
468 855
469 return false; 856 replace_fops(filp, new_fops);
857 if (filp->f_op->open)
858 err = filp->f_op->open(inode, filp);
859 else
860 err = 0;
861
862out_release:
863 drm_minor_release(minor);
864out_unlock:
865 mutex_unlock(&drm_global_mutex);
866 return err;
470} 867}
471EXPORT_SYMBOL(drm_ioctl_flags); 868
869static const struct file_operations drm_stub_fops = {
870 .owner = THIS_MODULE,
871 .open = drm_stub_open,
872 .llseek = noop_llseek,
873};
874
875static int __init drm_core_init(void)
876{
877 int ret = -ENOMEM;
878
879 drm_global_init();
880 drm_connector_ida_init();
881 idr_init(&drm_minors_idr);
882
883 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
884 goto err_p1;
885
886 drm_class = drm_sysfs_create(THIS_MODULE, "drm");
887 if (IS_ERR(drm_class)) {
888 printk(KERN_ERR "DRM: Error creating drm class.\n");
889 ret = PTR_ERR(drm_class);
890 goto err_p2;
891 }
892
893 drm_debugfs_root = debugfs_create_dir("dri", NULL);
894 if (!drm_debugfs_root) {
895 DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
896 ret = -1;
897 goto err_p3;
898 }
899
900 DRM_INFO("Initialized %s %d.%d.%d %s\n",
901 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
902 return 0;
903err_p3:
904 drm_sysfs_destroy();
905err_p2:
906 unregister_chrdev(DRM_MAJOR, "drm");
907
908 idr_destroy(&drm_minors_idr);
909err_p1:
910 return ret;
911}
912
913static void __exit drm_core_exit(void)
914{
915 debugfs_remove(drm_debugfs_root);
916 drm_sysfs_destroy();
917
918 unregister_chrdev(DRM_MAJOR, "drm");
919
920 drm_connector_ida_destroy();
921 idr_destroy(&drm_minors_idr);
922}
923
924module_init(drm_core_init);
925module_exit(drm_core_exit);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index dfa9769b26b5..1dbf3bc4c6a3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3305,6 +3305,7 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
3305 struct drm_device *dev = encoder->dev; 3305 struct drm_device *dev = encoder->dev;
3306 3306
3307 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 3307 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
3308 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3308 3309
3309 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 3310 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
3310 if (connector->encoder == encoder && connector->eld[0]) 3311 if (connector->encoder == encoder && connector->eld[0])
@@ -3775,8 +3776,14 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
3775 3776
3776 frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; 3777 frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
3777 3778
3778 /* Populate picture aspect ratio from CEA mode list */ 3779 /*
3779 if (frame->video_code > 0) 3780 * Populate picture aspect ratio from either
3781 * user input (if specified) or from the CEA mode list.
3782 */
3783 if (mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_4_3 ||
3784 mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_16_9)
3785 frame->picture_aspect = mode->picture_aspect_ratio;
3786 else if (frame->video_code > 0)
3780 frame->picture_aspect = drm_get_cea_aspect_ratio( 3787 frame->picture_aspect = drm_get_cea_aspect_ratio(
3781 frame->video_code); 3788 frame->video_code);
3782 3789
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index f27c883be391..cc0ae047ed3b 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -327,7 +327,7 @@ err_drm_gem_cma_free_object:
327 return ret; 327 return ret;
328} 328}
329 329
330static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = { 330static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
331 .fb_probe = drm_fbdev_cma_create, 331 .fb_probe = drm_fbdev_cma_create,
332}; 332};
333 333
@@ -354,9 +354,10 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
354 return ERR_PTR(-ENOMEM); 354 return ERR_PTR(-ENOMEM);
355 } 355 }
356 356
357 fbdev_cma->fb_helper.funcs = &drm_fb_cma_helper_funcs;
358 helper = &fbdev_cma->fb_helper; 357 helper = &fbdev_cma->fb_helper;
359 358
359 drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
360
360 ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count); 361 ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
361 if (ret < 0) { 362 if (ret < 0) {
362 dev_err(dev->dev, "Failed to initialize drm fb helper.\n"); 363 dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d5d8cea1a679..3144db9dc0f1 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -49,10 +49,11 @@ static LIST_HEAD(kernel_fb_helper_list);
49 * helper functions used by many drivers to implement the kernel mode setting 49 * helper functions used by many drivers to implement the kernel mode setting
50 * interfaces. 50 * interfaces.
51 * 51 *
52 * Initialization is done as a three-step process with drm_fb_helper_init(), 52 * Initialization is done as a four-step process with drm_fb_helper_prepare(),
53 * drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config(). 53 * drm_fb_helper_init(), drm_fb_helper_single_add_all_connectors() and
54 * Drivers with fancier requirements than the default behaviour can override the 54 * drm_fb_helper_initial_config(). Drivers with fancier requirements than the
55 * second step with their own code. Teardown is done with drm_fb_helper_fini(). 55 * default behaviour can override the third step with their own code.
56 * Teardown is done with drm_fb_helper_fini().
56 * 57 *
57 * At runtime drivers should restore the fbdev console by calling 58 * At runtime drivers should restore the fbdev console by calling
58 * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They 59 * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
@@ -63,6 +64,19 @@ static LIST_HEAD(kernel_fb_helper_list);
63 * 64 *
64 * All other functions exported by the fb helper library can be used to 65 * All other functions exported by the fb helper library can be used to
65 * implement the fbdev driver interface by the driver. 66 * implement the fbdev driver interface by the driver.
67 *
68 * It is possible, though perhaps somewhat tricky, to implement race-free
69 * hotplug detection using the fbdev helpers. The drm_fb_helper_prepare()
70 * helper must be called first to initialize the minimum required to make
71 * hotplug detection work. Drivers also need to make sure to properly set up
72 * the dev->mode_config.funcs member. After calling drm_kms_helper_poll_init()
73 * it is safe to enable interrupts and start processing hotplug events. At the
74 * same time, drivers should initialize all modeset objects such as CRTCs,
75 * encoders and connectors. To finish up the fbdev helper initialization, the
76 * drm_fb_helper_init() function is called. To probe for all attached displays
77 * and set up an initial configuration using the detected hardware, drivers
78 * should call drm_fb_helper_single_add_all_connectors() followed by
79 * drm_fb_helper_initial_config().
66 */ 80 */
67 81
68/** 82/**
@@ -105,6 +119,58 @@ fail:
105} 119}
106EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors); 120EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
107 121
122int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector)
123{
124 struct drm_fb_helper_connector **temp;
125 struct drm_fb_helper_connector *fb_helper_connector;
126
127 WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
128 if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) {
129 temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector) * (fb_helper->connector_count + 1), GFP_KERNEL);
130 if (!temp)
131 return -ENOMEM;
132
133 fb_helper->connector_info_alloc_count = fb_helper->connector_count + 1;
134 fb_helper->connector_info = temp;
135 }
136
137
138 fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
139 if (!fb_helper_connector)
140 return -ENOMEM;
141
142 fb_helper_connector->connector = connector;
143 fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
144 return 0;
145}
146EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
147
148int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
149 struct drm_connector *connector)
150{
151 struct drm_fb_helper_connector *fb_helper_connector;
152 int i, j;
153
154 WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
155
156 for (i = 0; i < fb_helper->connector_count; i++) {
157 if (fb_helper->connector_info[i]->connector == connector)
158 break;
159 }
160
161 if (i == fb_helper->connector_count)
162 return -EINVAL;
163 fb_helper_connector = fb_helper->connector_info[i];
164
165 for (j = i + 1; j < fb_helper->connector_count; j++) {
166 fb_helper->connector_info[j - 1] = fb_helper->connector_info[j];
167 }
168 fb_helper->connector_count--;
169 kfree(fb_helper_connector);
170 return 0;
171}
172EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
173
108static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper) 174static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
109{ 175{
110 struct drm_fb_helper_connector *fb_helper_conn; 176 struct drm_fb_helper_connector *fb_helper_conn;
@@ -199,9 +265,6 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
199 struct drm_crtc_helper_funcs *funcs; 265 struct drm_crtc_helper_funcs *funcs;
200 int i; 266 int i;
201 267
202 if (list_empty(&kernel_fb_helper_list))
203 return false;
204
205 list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { 268 list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
206 for (i = 0; i < helper->crtc_count; i++) { 269 for (i = 0; i < helper->crtc_count; i++) {
207 struct drm_mode_set *mode_set = 270 struct drm_mode_set *mode_set =
@@ -531,6 +594,24 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
531} 594}
532 595
533/** 596/**
597 * drm_fb_helper_prepare - setup a drm_fb_helper structure
598 * @dev: DRM device
599 * @helper: driver-allocated fbdev helper structure to set up
600 * @funcs: pointer to structure of functions associate with this helper
601 *
602 * Sets up the bare minimum to make the framebuffer helper usable. This is
603 * useful to implement race-free initialization of the polling helpers.
604 */
605void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
606 const struct drm_fb_helper_funcs *funcs)
607{
608 INIT_LIST_HEAD(&helper->kernel_fb_list);
609 helper->funcs = funcs;
610 helper->dev = dev;
611}
612EXPORT_SYMBOL(drm_fb_helper_prepare);
613
614/**
534 * drm_fb_helper_init - initialize a drm_fb_helper structure 615 * drm_fb_helper_init - initialize a drm_fb_helper structure
535 * @dev: drm device 616 * @dev: drm device
536 * @fb_helper: driver-allocated fbdev helper structure to initialize 617 * @fb_helper: driver-allocated fbdev helper structure to initialize
@@ -542,8 +623,7 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
542 * nor register the fbdev. This is only done in drm_fb_helper_initial_config() 623 * nor register the fbdev. This is only done in drm_fb_helper_initial_config()
543 * to allow driver writes more control over the exact init sequence. 624 * to allow driver writes more control over the exact init sequence.
544 * 625 *
545 * Drivers must set fb_helper->funcs before calling 626 * Drivers must call drm_fb_helper_prepare() before calling this function.
546 * drm_fb_helper_initial_config().
547 * 627 *
548 * RETURNS: 628 * RETURNS:
549 * Zero if everything went ok, nonzero otherwise. 629 * Zero if everything went ok, nonzero otherwise.
@@ -558,10 +638,6 @@ int drm_fb_helper_init(struct drm_device *dev,
558 if (!max_conn_count) 638 if (!max_conn_count)
559 return -EINVAL; 639 return -EINVAL;
560 640
561 fb_helper->dev = dev;
562
563 INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
564
565 fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL); 641 fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
566 if (!fb_helper->crtc_info) 642 if (!fb_helper->crtc_info)
567 return -ENOMEM; 643 return -ENOMEM;
@@ -572,6 +648,7 @@ int drm_fb_helper_init(struct drm_device *dev,
572 kfree(fb_helper->crtc_info); 648 kfree(fb_helper->crtc_info);
573 return -ENOMEM; 649 return -ENOMEM;
574 } 650 }
651 fb_helper->connector_info_alloc_count = dev->mode_config.num_connector;
575 fb_helper->connector_count = 0; 652 fb_helper->connector_count = 0;
576 653
577 for (i = 0; i < crtc_count; i++) { 654 for (i = 0; i < crtc_count; i++) {
@@ -1056,7 +1133,6 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
1056 info->fix.ypanstep = 1; /* doing it in hw */ 1133 info->fix.ypanstep = 1; /* doing it in hw */
1057 info->fix.ywrapstep = 0; 1134 info->fix.ywrapstep = 0;
1058 info->fix.accel = FB_ACCEL_NONE; 1135 info->fix.accel = FB_ACCEL_NONE;
1059 info->fix.type_aux = 0;
1060 1136
1061 info->fix.line_length = pitch; 1137 info->fix.line_length = pitch;
1062 return; 1138 return;
@@ -1613,8 +1689,10 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
1613 * either the output polling work or a work item launched from the driver's 1689 * either the output polling work or a work item launched from the driver's
1614 * hotplug interrupt). 1690 * hotplug interrupt).
1615 * 1691 *
1616 * Note that the driver must ensure that this is only called _after_ the fb has 1692 * Note that drivers may call this even before calling
1617 * been fully set up, i.e. after the call to drm_fb_helper_initial_config. 1693 * drm_fb_helper_initial_config but only aftert drm_fb_helper_init. This allows
1694 * for a race-free fbcon setup and will make sure that the fbdev emulation will
1695 * not miss any hotplug events.
1618 * 1696 *
1619 * RETURNS: 1697 * RETURNS:
1620 * 0 on success and a non-zero error code otherwise. 1698 * 0 on success and a non-zero error code otherwise.
@@ -1624,11 +1702,8 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1624 struct drm_device *dev = fb_helper->dev; 1702 struct drm_device *dev = fb_helper->dev;
1625 u32 max_width, max_height; 1703 u32 max_width, max_height;
1626 1704
1627 if (!fb_helper->fb)
1628 return 0;
1629
1630 mutex_lock(&fb_helper->dev->mode_config.mutex); 1705 mutex_lock(&fb_helper->dev->mode_config.mutex);
1631 if (!drm_fb_helper_is_bound(fb_helper)) { 1706 if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) {
1632 fb_helper->delayed_hotplug = true; 1707 fb_helper->delayed_hotplug = true;
1633 mutex_unlock(&fb_helper->dev->mode_config.mutex); 1708 mutex_unlock(&fb_helper->dev->mode_config.mutex);
1634 return 0; 1709 return 0;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 021fe5d11df5..79d5221c6e41 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -38,6 +38,7 @@
38#include <linux/poll.h> 38#include <linux/poll.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/module.h> 40#include <linux/module.h>
41#include "drm_legacy.h"
41 42
42/* from BKL pushdown */ 43/* from BKL pushdown */
43DEFINE_MUTEX(drm_global_mutex); 44DEFINE_MUTEX(drm_global_mutex);
@@ -112,55 +113,12 @@ err_undo:
112EXPORT_SYMBOL(drm_open); 113EXPORT_SYMBOL(drm_open);
113 114
114/** 115/**
115 * File \c open operation.
116 *
117 * \param inode device inode.
118 * \param filp file pointer.
119 *
120 * Puts the dev->fops corresponding to the device minor number into
121 * \p filp, call the \c open method, and restore the file operations.
122 */
123int drm_stub_open(struct inode *inode, struct file *filp)
124{
125 struct drm_device *dev;
126 struct drm_minor *minor;
127 int err = -ENODEV;
128 const struct file_operations *new_fops;
129
130 DRM_DEBUG("\n");
131
132 mutex_lock(&drm_global_mutex);
133 minor = drm_minor_acquire(iminor(inode));
134 if (IS_ERR(minor))
135 goto out_unlock;
136
137 dev = minor->dev;
138 new_fops = fops_get(dev->driver->fops);
139 if (!new_fops)
140 goto out_release;
141
142 replace_fops(filp, new_fops);
143 if (filp->f_op->open)
144 err = filp->f_op->open(inode, filp);
145
146out_release:
147 drm_minor_release(minor);
148out_unlock:
149 mutex_unlock(&drm_global_mutex);
150 return err;
151}
152
153/**
154 * Check whether DRI will run on this CPU. 116 * Check whether DRI will run on this CPU.
155 * 117 *
156 * \return non-zero if the DRI will run on this CPU, or zero otherwise. 118 * \return non-zero if the DRI will run on this CPU, or zero otherwise.
157 */ 119 */
158static int drm_cpu_valid(void) 120static int drm_cpu_valid(void)
159{ 121{
160#if defined(__i386__)
161 if (boot_cpu_data.x86 == 3)
162 return 0; /* No cmpxchg on a 386 */
163#endif
164#if defined(__sparc__) && !defined(__sparc_v9__) 122#if defined(__sparc__) && !defined(__sparc_v9__)
165 return 0; /* No cmpxchg before v9 sparc. */ 123 return 0; /* No cmpxchg before v9 sparc. */
166#endif 124#endif
@@ -203,8 +161,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
203 priv->minor = minor; 161 priv->minor = minor;
204 162
205 /* for compatibility root is always authenticated */ 163 /* for compatibility root is always authenticated */
206 priv->always_authenticated = capable(CAP_SYS_ADMIN); 164 priv->authenticated = capable(CAP_SYS_ADMIN);
207 priv->authenticated = priv->always_authenticated;
208 priv->lock_count = 0; 165 priv->lock_count = 0;
209 166
210 INIT_LIST_HEAD(&priv->lhead); 167 INIT_LIST_HEAD(&priv->lhead);
@@ -429,6 +386,10 @@ int drm_release(struct inode *inode, struct file *filp)
429 386
430 DRM_DEBUG("open_count = %d\n", dev->open_count); 387 DRM_DEBUG("open_count = %d\n", dev->open_count);
431 388
389 mutex_lock(&dev->struct_mutex);
390 list_del(&file_priv->lhead);
391 mutex_unlock(&dev->struct_mutex);
392
432 if (dev->driver->preclose) 393 if (dev->driver->preclose)
433 dev->driver->preclose(dev, file_priv); 394 dev->driver->preclose(dev, file_priv);
434 395
@@ -461,44 +422,18 @@ int drm_release(struct inode *inode, struct file *filp)
461 if (dev->driver->driver_features & DRIVER_GEM) 422 if (dev->driver->driver_features & DRIVER_GEM)
462 drm_gem_release(dev, file_priv); 423 drm_gem_release(dev, file_priv);
463 424
464 mutex_lock(&dev->ctxlist_mutex); 425 drm_legacy_ctxbitmap_flush(dev, file_priv);
465 if (!list_empty(&dev->ctxlist)) {
466 struct drm_ctx_list *pos, *n;
467
468 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
469 if (pos->tag == file_priv &&
470 pos->handle != DRM_KERNEL_CONTEXT) {
471 if (dev->driver->context_dtor)
472 dev->driver->context_dtor(dev,
473 pos->handle);
474
475 drm_ctxbitmap_free(dev, pos->handle);
476
477 list_del(&pos->head);
478 kfree(pos);
479 }
480 }
481 }
482 mutex_unlock(&dev->ctxlist_mutex);
483 426
484 mutex_lock(&dev->master_mutex); 427 mutex_lock(&dev->master_mutex);
485 428
486 if (file_priv->is_master) { 429 if (file_priv->is_master) {
487 struct drm_master *master = file_priv->master; 430 struct drm_master *master = file_priv->master;
488 struct drm_file *temp;
489
490 mutex_lock(&dev->struct_mutex);
491 list_for_each_entry(temp, &dev->filelist, lhead) {
492 if ((temp->master == file_priv->master) &&
493 (temp != file_priv))
494 temp->authenticated = temp->always_authenticated;
495 }
496 431
497 /** 432 /**
498 * Since the master is disappearing, so is the 433 * Since the master is disappearing, so is the
499 * possibility to lock. 434 * possibility to lock.
500 */ 435 */
501 436 mutex_lock(&dev->struct_mutex);
502 if (master->lock.hw_lock) { 437 if (master->lock.hw_lock) {
503 if (dev->sigdata.lock == master->lock.hw_lock) 438 if (dev->sigdata.lock == master->lock.hw_lock)
504 dev->sigdata.lock = NULL; 439 dev->sigdata.lock = NULL;
@@ -522,10 +457,6 @@ int drm_release(struct inode *inode, struct file *filp)
522 file_priv->is_master = 0; 457 file_priv->is_master = 0;
523 mutex_unlock(&dev->master_mutex); 458 mutex_unlock(&dev->master_mutex);
524 459
525 mutex_lock(&dev->struct_mutex);
526 list_del(&file_priv->lhead);
527 mutex_unlock(&dev->struct_mutex);
528
529 if (dev->driver->postclose) 460 if (dev->driver->postclose)
530 dev->driver->postclose(dev, file_priv); 461 dev->driver->postclose(dev, file_priv);
531 462
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index f7d71190aad5..6adee4c2afc0 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -441,18 +441,31 @@ EXPORT_SYMBOL(drm_gem_create_mmap_offset);
441 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 441 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
442 * from shmem 442 * from shmem
443 * @obj: obj in question 443 * @obj: obj in question
444 * @gfpmask: gfp mask of requested pages 444 *
445 * This reads the page-array of the shmem-backing storage of the given gem
446 * object. An array of pages is returned. If a page is not allocated or
447 * swapped-out, this will allocate/swap-in the required pages. Note that the
448 * whole object is covered by the page-array and pinned in memory.
449 *
450 * Use drm_gem_put_pages() to release the array and unpin all pages.
451 *
452 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
453 * If you require other GFP-masks, you have to do those allocations yourself.
454 *
455 * Note that you are not allowed to change gfp-zones during runtime. That is,
456 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
457 * set during initialization. If you have special zone constraints, set them
458 * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
459 * to keep pages in the required zone during swap-in.
445 */ 460 */
446struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) 461struct page **drm_gem_get_pages(struct drm_gem_object *obj)
447{ 462{
448 struct inode *inode;
449 struct address_space *mapping; 463 struct address_space *mapping;
450 struct page *p, **pages; 464 struct page *p, **pages;
451 int i, npages; 465 int i, npages;
452 466
453 /* This is the shared memory object that backs the GEM resource */ 467 /* This is the shared memory object that backs the GEM resource */
454 inode = file_inode(obj->filp); 468 mapping = file_inode(obj->filp)->i_mapping;
455 mapping = inode->i_mapping;
456 469
457 /* We already BUG_ON() for non-page-aligned sizes in 470 /* We already BUG_ON() for non-page-aligned sizes in
458 * drm_gem_object_init(), so we should never hit this unless 471 * drm_gem_object_init(), so we should never hit this unless
@@ -466,10 +479,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
466 if (pages == NULL) 479 if (pages == NULL)
467 return ERR_PTR(-ENOMEM); 480 return ERR_PTR(-ENOMEM);
468 481
469 gfpmask |= mapping_gfp_mask(mapping);
470
471 for (i = 0; i < npages; i++) { 482 for (i = 0; i < npages; i++) {
472 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); 483 p = shmem_read_mapping_page(mapping, i);
473 if (IS_ERR(p)) 484 if (IS_ERR(p))
474 goto fail; 485 goto fail;
475 pages[i] = p; 486 pages[i] = p;
@@ -479,7 +490,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
479 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) 490 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
480 * so shmem can relocate pages during swapin if required. 491 * so shmem can relocate pages during swapin if required.
481 */ 492 */
482 BUG_ON((gfpmask & __GFP_DMA32) && 493 BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) &&
483 (page_to_pfn(p) >= 0x00100000UL)); 494 (page_to_pfn(p) >= 0x00100000UL));
484 } 495 }
485 496
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 05c97c5350a1..e467e67af6e7 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -327,7 +327,7 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size,
327 /* Create a CMA GEM buffer. */ 327 /* Create a CMA GEM buffer. */
328 cma_obj = __drm_gem_cma_create(dev, size); 328 cma_obj = __drm_gem_cma_create(dev, size);
329 if (IS_ERR(cma_obj)) 329 if (IS_ERR(cma_obj))
330 return ERR_PTR(PTR_ERR(cma_obj)); 330 return ERR_CAST(cma_obj);
331 331
332 cma_obj->paddr = sg_dma_address(sgt->sgl); 332 cma_obj->paddr = sg_dma_address(sgt->sgl);
333 cma_obj->sgt = sgt; 333 cma_obj->sgt = sgt;
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 86feedd5e6f6..ecaf0fa2eec8 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -132,7 +132,7 @@ int drm_bufs_info(struct seq_file *m, void *data)
132 i, 132 i,
133 dma->bufs[i].buf_size, 133 dma->bufs[i].buf_size,
134 dma->bufs[i].buf_count, 134 dma->bufs[i].buf_count,
135 atomic_read(&dma->bufs[i].freelist.count), 135 0,
136 dma->bufs[i].seg_count, 136 dma->bufs[i].seg_count,
137 seg_pages, 137 seg_pages,
138 seg_pages * PAGE_SIZE / 1024); 138 seg_pages * PAGE_SIZE / 1024);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 69c61f392e66..40be746b7e68 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -1,11 +1,3 @@
1/**
2 * \file drm_ioctl.c
3 * IOCTL processing for DRM
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/* 1/*
10 * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com 2 * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
11 * 3 *
@@ -13,6 +5,9 @@
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved. 6 * All Rights Reserved.
15 * 7 *
8 * Author Rickard E. (Rik) Faith <faith@valinux.com>
9 * Author Gareth Hughes <gareth@valinux.com>
10 *
16 * Permission is hereby granted, free of charge, to any person obtaining a 11 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"), 12 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation 13 * to deal in the Software without restriction, including without limitation
@@ -35,6 +30,7 @@
35 30
36#include <drm/drmP.h> 31#include <drm/drmP.h>
37#include <drm/drm_core.h> 32#include <drm/drm_core.h>
33#include "drm_legacy.h"
38 34
39#include <linux/pci.h> 35#include <linux/pci.h>
40#include <linux/export.h> 36#include <linux/export.h>
@@ -42,6 +38,124 @@
42#include <asm/mtrr.h> 38#include <asm/mtrr.h>
43#endif 39#endif
44 40
41static int drm_version(struct drm_device *dev, void *data,
42 struct drm_file *file_priv);
43
44#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
45 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
46
47/** Ioctl table */
48static const struct drm_ioctl_desc drm_ioctls[] = {
49 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
50 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
51 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
52 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
53 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
54 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
55 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
56 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
57 DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
58 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
59
60 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
61 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
62 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
63 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
64
65 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
66 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
67
68 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
69 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
70
71 DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
72 DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
73
74 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
75 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
76 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
77 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
78 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
79 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
80 DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
81
82 DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
83 DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
84
85 DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
86 DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
87
88 DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
89
90 DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
91 DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
92 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
93 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
94 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
95 DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
96
97 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
98
99#if __OS_HAS_AGP
100 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
101 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
102 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
103 DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
104 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
105 DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
106 DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
107 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
108#endif
109
110 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
111 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
112
113 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
114
115 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
116
117 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
118
119 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
120 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
121 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
122
123 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
124
125 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
126 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
127
128 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
129 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
130 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
131 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
132 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
133 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
134 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
135 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
136 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
137 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
138 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
139 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
140 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
141 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
142 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
144 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
145 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
146 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
147 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
148 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
149 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
150 DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
151 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
152 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
153 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
154 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
155};
156
157#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
158
45/** 159/**
46 * Get the bus id. 160 * Get the bus id.
47 * 161 *
@@ -342,8 +456,6 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
342 file_priv->stereo_allowed = req->value; 456 file_priv->stereo_allowed = req->value;
343 break; 457 break;
344 case DRM_CLIENT_CAP_UNIVERSAL_PLANES: 458 case DRM_CLIENT_CAP_UNIVERSAL_PLANES:
345 if (!drm_universal_planes)
346 return -EINVAL;
347 if (req->value > 1) 459 if (req->value > 1)
348 return -EINVAL; 460 return -EINVAL;
349 file_priv->universal_planes = req->value; 461 file_priv->universal_planes = req->value;
@@ -417,3 +529,243 @@ int drm_noop(struct drm_device *dev, void *data,
417 return 0; 529 return 0;
418} 530}
419EXPORT_SYMBOL(drm_noop); 531EXPORT_SYMBOL(drm_noop);
532
533/**
534 * Copy and IOCTL return string to user space
535 */
536static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
537{
538 int len;
539
540 /* don't overflow userbuf */
541 len = strlen(value);
542 if (len > *buf_len)
543 len = *buf_len;
544
545 /* let userspace know exact length of driver value (which could be
546 * larger than the userspace-supplied buffer) */
547 *buf_len = strlen(value);
548
549 /* finally, try filling in the userbuf */
550 if (len && buf)
551 if (copy_to_user(buf, value, len))
552 return -EFAULT;
553 return 0;
554}
555
556/**
557 * Get version information
558 *
559 * \param inode device inode.
560 * \param filp file pointer.
561 * \param cmd command.
562 * \param arg user argument, pointing to a drm_version structure.
563 * \return zero on success or negative number on failure.
564 *
565 * Fills in the version information in \p arg.
566 */
567static int drm_version(struct drm_device *dev, void *data,
568 struct drm_file *file_priv)
569{
570 struct drm_version *version = data;
571 int err;
572
573 version->version_major = dev->driver->major;
574 version->version_minor = dev->driver->minor;
575 version->version_patchlevel = dev->driver->patchlevel;
576 err = drm_copy_field(version->name, &version->name_len,
577 dev->driver->name);
578 if (!err)
579 err = drm_copy_field(version->date, &version->date_len,
580 dev->driver->date);
581 if (!err)
582 err = drm_copy_field(version->desc, &version->desc_len,
583 dev->driver->desc);
584
585 return err;
586}
587
588/**
589 * drm_ioctl_permit - Check ioctl permissions against caller
590 *
591 * @flags: ioctl permission flags.
592 * @file_priv: Pointer to struct drm_file identifying the caller.
593 *
594 * Checks whether the caller is allowed to run an ioctl with the
595 * indicated permissions. If so, returns zero. Otherwise returns an
596 * error code suitable for ioctl return.
597 */
598static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
599{
600 /* ROOT_ONLY is only for CAP_SYS_ADMIN */
601 if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
602 return -EACCES;
603
604 /* AUTH is only for authenticated or render client */
605 if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
606 !file_priv->authenticated))
607 return -EACCES;
608
609 /* MASTER is only for master or control clients */
610 if (unlikely((flags & DRM_MASTER) && !file_priv->is_master &&
611 !drm_is_control_client(file_priv)))
612 return -EACCES;
613
614 /* Control clients must be explicitly allowed */
615 if (unlikely(!(flags & DRM_CONTROL_ALLOW) &&
616 drm_is_control_client(file_priv)))
617 return -EACCES;
618
619 /* Render clients must be explicitly allowed */
620 if (unlikely(!(flags & DRM_RENDER_ALLOW) &&
621 drm_is_render_client(file_priv)))
622 return -EACCES;
623
624 return 0;
625}
626
627/**
628 * Called whenever a process performs an ioctl on /dev/drm.
629 *
630 * \param inode device inode.
631 * \param file_priv DRM file private.
632 * \param cmd command.
633 * \param arg user argument.
634 * \return zero on success or negative number on failure.
635 *
636 * Looks up the ioctl function in the ::ioctls table, checking for root
637 * previleges if so required, and dispatches to the respective function.
638 */
639long drm_ioctl(struct file *filp,
640 unsigned int cmd, unsigned long arg)
641{
642 struct drm_file *file_priv = filp->private_data;
643 struct drm_device *dev;
644 const struct drm_ioctl_desc *ioctl = NULL;
645 drm_ioctl_t *func;
646 unsigned int nr = DRM_IOCTL_NR(cmd);
647 int retcode = -EINVAL;
648 char stack_kdata[128];
649 char *kdata = NULL;
650 unsigned int usize, asize;
651
652 dev = file_priv->minor->dev;
653
654 if (drm_device_is_unplugged(dev))
655 return -ENODEV;
656
657 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
658 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
659 goto err_i1;
660 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
661 (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
662 u32 drv_size;
663 ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
664 drv_size = _IOC_SIZE(ioctl->cmd_drv);
665 usize = asize = _IOC_SIZE(cmd);
666 if (drv_size > asize)
667 asize = drv_size;
668 cmd = ioctl->cmd_drv;
669 }
670 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
671 u32 drv_size;
672
673 ioctl = &drm_ioctls[nr];
674
675 drv_size = _IOC_SIZE(ioctl->cmd);
676 usize = asize = _IOC_SIZE(cmd);
677 if (drv_size > asize)
678 asize = drv_size;
679
680 cmd = ioctl->cmd;
681 } else
682 goto err_i1;
683
684 DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
685 task_pid_nr(current),
686 (long)old_encode_dev(file_priv->minor->kdev->devt),
687 file_priv->authenticated, ioctl->name);
688
689 /* Do not trust userspace, use our own definition */
690 func = ioctl->func;
691
692 if (unlikely(!func)) {
693 DRM_DEBUG("no function\n");
694 retcode = -EINVAL;
695 goto err_i1;
696 }
697
698 retcode = drm_ioctl_permit(ioctl->flags, file_priv);
699 if (unlikely(retcode))
700 goto err_i1;
701
702 if (cmd & (IOC_IN | IOC_OUT)) {
703 if (asize <= sizeof(stack_kdata)) {
704 kdata = stack_kdata;
705 } else {
706 kdata = kmalloc(asize, GFP_KERNEL);
707 if (!kdata) {
708 retcode = -ENOMEM;
709 goto err_i1;
710 }
711 }
712 if (asize > usize)
713 memset(kdata + usize, 0, asize - usize);
714 }
715
716 if (cmd & IOC_IN) {
717 if (copy_from_user(kdata, (void __user *)arg,
718 usize) != 0) {
719 retcode = -EFAULT;
720 goto err_i1;
721 }
722 } else if (cmd & IOC_OUT) {
723 memset(kdata, 0, usize);
724 }
725
726 if (ioctl->flags & DRM_UNLOCKED)
727 retcode = func(dev, kdata, file_priv);
728 else {
729 mutex_lock(&drm_global_mutex);
730 retcode = func(dev, kdata, file_priv);
731 mutex_unlock(&drm_global_mutex);
732 }
733
734 if (cmd & IOC_OUT) {
735 if (copy_to_user((void __user *)arg, kdata,
736 usize) != 0)
737 retcode = -EFAULT;
738 }
739
740 err_i1:
741 if (!ioctl)
742 DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
743 task_pid_nr(current),
744 (long)old_encode_dev(file_priv->minor->kdev->devt),
745 file_priv->authenticated, cmd, nr);
746
747 if (kdata != stack_kdata)
748 kfree(kdata);
749 if (retcode)
750 DRM_DEBUG("ret = %d\n", retcode);
751 return retcode;
752}
753EXPORT_SYMBOL(drm_ioctl);
754
755/**
756 * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags
757 *
758 * @nr: Ioctl number.
759 * @flags: Where to return the ioctl permission flags
760 */
761bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
762{
763 if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) ||
764 (nr < DRM_COMMAND_BASE)) {
765 *flags = drm_ioctls[nr].flags;
766 return true;
767 }
768
769 return false;
770}
771EXPORT_SYMBOL(drm_ioctl_flags);
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
new file mode 100644
index 000000000000..d34f20a79b7c
--- /dev/null
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -0,0 +1,51 @@
1#ifndef __DRM_LEGACY_H__
2#define __DRM_LEGACY_H__
3
4/*
5 * Copyright (c) 2014 David Herrmann <dh.herrmann@gmail.com>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26struct drm_device;
27struct drm_file;
28
29/*
30 * Generic DRM Contexts
31 */
32
33#define DRM_KERNEL_CONTEXT 0
34#define DRM_RESERVED_CONTEXTS 1
35
36int drm_legacy_ctxbitmap_init(struct drm_device *dev);
37void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
38void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
39void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
40
41int drm_legacy_resctx(struct drm_device *d, void *v, struct drm_file *f);
42int drm_legacy_addctx(struct drm_device *d, void *v, struct drm_file *f);
43int drm_legacy_getctx(struct drm_device *d, void *v, struct drm_file *f);
44int drm_legacy_switchctx(struct drm_device *d, void *v, struct drm_file *f);
45int drm_legacy_newctx(struct drm_device *d, void *v, struct drm_file *f);
46int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f);
47
48int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f);
49int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
50
51#endif /* __DRM_LEGACY_H__ */
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index f6452682141b..e26b59e385ff 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -35,6 +35,7 @@
35 35
36#include <linux/export.h> 36#include <linux/export.h>
37#include <drm/drmP.h> 37#include <drm/drmP.h>
38#include "drm_legacy.h"
38 39
39static int drm_notifier(void *priv); 40static int drm_notifier(void *priv);
40 41
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index e633df2f68d8..6aa6a9e95570 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -201,16 +201,15 @@ EXPORT_SYMBOL(mipi_dsi_detach);
201/** 201/**
202 * mipi_dsi_dcs_write - send DCS write command 202 * mipi_dsi_dcs_write - send DCS write command
203 * @dsi: DSI device 203 * @dsi: DSI device
204 * @channel: virtual channel
205 * @data: pointer to the command followed by parameters 204 * @data: pointer to the command followed by parameters
206 * @len: length of @data 205 * @len: length of @data
207 */ 206 */
208int mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, unsigned int channel, 207ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
209 const void *data, size_t len) 208 size_t len)
210{ 209{
211 const struct mipi_dsi_host_ops *ops = dsi->host->ops; 210 const struct mipi_dsi_host_ops *ops = dsi->host->ops;
212 struct mipi_dsi_msg msg = { 211 struct mipi_dsi_msg msg = {
213 .channel = channel, 212 .channel = dsi->channel,
214 .tx_buf = data, 213 .tx_buf = data,
215 .tx_len = len 214 .tx_len = len
216 }; 215 };
@@ -239,19 +238,18 @@ EXPORT_SYMBOL(mipi_dsi_dcs_write);
239/** 238/**
240 * mipi_dsi_dcs_read - send DCS read request command 239 * mipi_dsi_dcs_read - send DCS read request command
241 * @dsi: DSI device 240 * @dsi: DSI device
242 * @channel: virtual channel
243 * @cmd: DCS read command 241 * @cmd: DCS read command
244 * @data: pointer to read buffer 242 * @data: pointer to read buffer
245 * @len: length of @data 243 * @len: length of @data
246 * 244 *
247 * Function returns number of read bytes or error code. 245 * Function returns number of read bytes or error code.
248 */ 246 */
249ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel, 247ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
250 u8 cmd, void *data, size_t len) 248 size_t len)
251{ 249{
252 const struct mipi_dsi_host_ops *ops = dsi->host->ops; 250 const struct mipi_dsi_host_ops *ops = dsi->host->ops;
253 struct mipi_dsi_msg msg = { 251 struct mipi_dsi_msg msg = {
254 .channel = channel, 252 .channel = dsi->channel,
255 .type = MIPI_DSI_DCS_READ, 253 .type = MIPI_DSI_DCS_READ,
256 .tx_buf = &cmd, 254 .tx_buf = &cmd,
257 .tx_len = 1, 255 .tx_len = 1,
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
new file mode 100644
index 000000000000..16150a00c237
--- /dev/null
+++ b/drivers/gpu/drm/drm_of.c
@@ -0,0 +1,67 @@
1#include <linux/export.h>
2#include <linux/list.h>
3#include <linux/of_graph.h>
4#include <drm/drmP.h>
5#include <drm/drm_crtc.h>
6#include <drm/drm_of.h>
7
8/**
9 * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
10 * @dev: DRM device
11 * @port: port OF node
12 *
13 * Given a port OF node, return the possible mask of the corresponding
14 * CRTC within a device's list of CRTCs. Returns zero if not found.
15 */
16static uint32_t drm_crtc_port_mask(struct drm_device *dev,
17 struct device_node *port)
18{
19 unsigned int index = 0;
20 struct drm_crtc *tmp;
21
22 list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
23 if (tmp->port == port)
24 return 1 << index;
25
26 index++;
27 }
28
29 return 0;
30}
31
32/**
33 * drm_of_find_possible_crtcs - find the possible CRTCs for an encoder port
34 * @dev: DRM device
35 * @port: encoder port to scan for endpoints
36 *
37 * Scan all endpoints attached to a port, locate their attached CRTCs,
38 * and generate the DRM mask of CRTCs which may be attached to this
39 * encoder.
40 *
41 * See Documentation/devicetree/bindings/graph.txt for the bindings.
42 */
43uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
44 struct device_node *port)
45{
46 struct device_node *remote_port, *ep = NULL;
47 uint32_t possible_crtcs = 0;
48
49 do {
50 ep = of_graph_get_next_endpoint(port, ep);
51 if (!ep)
52 break;
53
54 remote_port = of_graph_get_remote_port(ep);
55 if (!remote_port) {
56 of_node_put(ep);
57 return 0;
58 }
59
60 possible_crtcs |= drm_crtc_port_mask(dev, remote_port);
61
62 of_node_put(remote_port);
63 } while (1);
64
65 return possible_crtcs;
66}
67EXPORT_SYMBOL(drm_of_find_possible_crtcs);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 6d133149cc74..827ec1a3040b 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -335,9 +335,10 @@ struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
335 } 335 }
336 336
337 /* possible_crtc's will be filled in later by crtc_init */ 337 /* possible_crtc's will be filled in later by crtc_init */
338 ret = drm_plane_init(dev, primary, 0, &drm_primary_helper_funcs, 338 ret = drm_universal_plane_init(dev, primary, 0,
339 formats, num_formats, 339 &drm_primary_helper_funcs,
340 DRM_PLANE_TYPE_PRIMARY); 340 formats, num_formats,
341 DRM_PLANE_TYPE_PRIMARY);
341 if (ret) { 342 if (ret) {
342 kfree(primary); 343 kfree(primary);
343 primary = NULL; 344 primary = NULL;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index d22676b89cbb..db7d250f7ac7 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -130,7 +130,14 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
130 count = drm_load_edid_firmware(connector); 130 count = drm_load_edid_firmware(connector);
131 if (count == 0) 131 if (count == 0)
132#endif 132#endif
133 count = (*connector_funcs->get_modes)(connector); 133 {
134 if (connector->override_edid) {
135 struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
136
137 count = drm_add_edid_modes(connector, edid);
138 } else
139 count = (*connector_funcs->get_modes)(connector);
140 }
134 141
135 if (count == 0 && connector->status == connector_status_connected) 142 if (count == 0 && connector->status == connector_status_connected)
136 count = drm_add_modes_noedid(connector, 1024, 768); 143 count = drm_add_modes_noedid(connector, 1024, 768);
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index 7047ca025787..631f5afd451c 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -293,3 +293,143 @@ void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point)
293 DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1); 293 DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
294} 294}
295EXPORT_SYMBOL(drm_rect_debug_print); 295EXPORT_SYMBOL(drm_rect_debug_print);
296
297/**
298 * drm_rect_rotate - Rotate the rectangle
299 * @r: rectangle to be rotated
300 * @width: Width of the coordinate space
301 * @height: Height of the coordinate space
302 * @rotation: Transformation to be applied
303 *
304 * Apply @rotation to the coordinates of rectangle @r.
305 *
306 * @width and @height combined with @rotation define
307 * the location of the new origin.
308 *
309 * @width correcsponds to the horizontal and @height
310 * to the vertical axis of the untransformed coordinate
311 * space.
312 */
313void drm_rect_rotate(struct drm_rect *r,
314 int width, int height,
315 unsigned int rotation)
316{
317 struct drm_rect tmp;
318
319 if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
320 tmp = *r;
321
322 if (rotation & BIT(DRM_REFLECT_X)) {
323 r->x1 = width - tmp.x2;
324 r->x2 = width - tmp.x1;
325 }
326
327 if (rotation & BIT(DRM_REFLECT_Y)) {
328 r->y1 = height - tmp.y2;
329 r->y2 = height - tmp.y1;
330 }
331 }
332
333 switch (rotation & 0xf) {
334 case BIT(DRM_ROTATE_0):
335 break;
336 case BIT(DRM_ROTATE_90):
337 tmp = *r;
338 r->x1 = tmp.y1;
339 r->x2 = tmp.y2;
340 r->y1 = width - tmp.x2;
341 r->y2 = width - tmp.x1;
342 break;
343 case BIT(DRM_ROTATE_180):
344 tmp = *r;
345 r->x1 = width - tmp.x2;
346 r->x2 = width - tmp.x1;
347 r->y1 = height - tmp.y2;
348 r->y2 = height - tmp.y1;
349 break;
350 case BIT(DRM_ROTATE_270):
351 tmp = *r;
352 r->x1 = height - tmp.y2;
353 r->x2 = height - tmp.y1;
354 r->y1 = tmp.x1;
355 r->y2 = tmp.x2;
356 break;
357 default:
358 break;
359 }
360}
361EXPORT_SYMBOL(drm_rect_rotate);
362
363/**
364 * drm_rect_rotate_inv - Inverse rotate the rectangle
365 * @r: rectangle to be rotated
366 * @width: Width of the coordinate space
367 * @height: Height of the coordinate space
368 * @rotation: Transformation whose inverse is to be applied
369 *
370 * Apply the inverse of @rotation to the coordinates
371 * of rectangle @r.
372 *
373 * @width and @height combined with @rotation define
374 * the location of the new origin.
375 *
376 * @width correcsponds to the horizontal and @height
377 * to the vertical axis of the original untransformed
378 * coordinate space, so that you never have to flip
379 * them when doing a rotatation and its inverse.
380 * That is, if you do:
381 *
382 * drm_rotate(&r, width, height, rotation);
383 * drm_rotate_inv(&r, width, height, rotation);
384 *
385 * you will always get back the original rectangle.
386 */
387void drm_rect_rotate_inv(struct drm_rect *r,
388 int width, int height,
389 unsigned int rotation)
390{
391 struct drm_rect tmp;
392
393 switch (rotation & 0xf) {
394 case BIT(DRM_ROTATE_0):
395 break;
396 case BIT(DRM_ROTATE_90):
397 tmp = *r;
398 r->x1 = width - tmp.y2;
399 r->x2 = width - tmp.y1;
400 r->y1 = tmp.x1;
401 r->y2 = tmp.x2;
402 break;
403 case BIT(DRM_ROTATE_180):
404 tmp = *r;
405 r->x1 = width - tmp.x2;
406 r->x2 = width - tmp.x1;
407 r->y1 = height - tmp.y2;
408 r->y2 = height - tmp.y1;
409 break;
410 case BIT(DRM_ROTATE_270):
411 tmp = *r;
412 r->x1 = tmp.y1;
413 r->x2 = tmp.y2;
414 r->y1 = height - tmp.x2;
415 r->y2 = height - tmp.x1;
416 break;
417 default:
418 break;
419 }
420
421 if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
422 tmp = *r;
423
424 if (rotation & BIT(DRM_REFLECT_X)) {
425 r->x1 = width - tmp.x2;
426 r->x2 = width - tmp.x1;
427 }
428
429 if (rotation & BIT(DRM_REFLECT_Y)) {
430 r->y1 = height - tmp.y2;
431 r->y2 = height - tmp.y1;
432 }
433 }
434}
435EXPORT_SYMBOL(drm_rect_rotate_inv);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
deleted file mode 100644
index 14d16464000a..000000000000
--- a/drivers/gpu/drm/drm_stub.c
+++ /dev/null
@@ -1,805 +0,0 @@
1/*
2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3 *
4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5 * All Rights Reserved.
6 *
7 * Author Rickard E. (Rik) Faith <faith@valinux.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 */
28
29#include <linux/fs.h>
30#include <linux/module.h>
31#include <linux/moduleparam.h>
32#include <linux/mount.h>
33#include <linux/slab.h>
34#include <drm/drmP.h>
35#include <drm/drm_core.h>
36
37unsigned int drm_debug = 0; /* 1 to enable debug output */
38EXPORT_SYMBOL(drm_debug);
39
40unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
41EXPORT_SYMBOL(drm_rnodes);
42
43/* 1 to allow user space to request universal planes (experimental) */
44unsigned int drm_universal_planes = 0;
45EXPORT_SYMBOL(drm_universal_planes);
46
47unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
48EXPORT_SYMBOL(drm_vblank_offdelay);
49
50unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
51EXPORT_SYMBOL(drm_timestamp_precision);
52
53/*
54 * Default to use monotonic timestamps for wait-for-vblank and page-flip
55 * complete events.
56 */
57unsigned int drm_timestamp_monotonic = 1;
58
59MODULE_AUTHOR(CORE_AUTHOR);
60MODULE_DESCRIPTION(CORE_DESC);
61MODULE_LICENSE("GPL and additional rights");
62MODULE_PARM_DESC(debug, "Enable debug output");
63MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API");
64MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
65MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
66MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
67
68module_param_named(debug, drm_debug, int, 0600);
69module_param_named(rnodes, drm_rnodes, int, 0600);
70module_param_named(universal_planes, drm_universal_planes, int, 0600);
71module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
72module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
73module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
74
75static DEFINE_SPINLOCK(drm_minor_lock);
76struct idr drm_minors_idr;
77
78struct class *drm_class;
79struct dentry *drm_debugfs_root;
80
81int drm_err(const char *func, const char *format, ...)
82{
83 struct va_format vaf;
84 va_list args;
85 int r;
86
87 va_start(args, format);
88
89 vaf.fmt = format;
90 vaf.va = &args;
91
92 r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
93
94 va_end(args);
95
96 return r;
97}
98EXPORT_SYMBOL(drm_err);
99
100void drm_ut_debug_printk(const char *function_name, const char *format, ...)
101{
102 struct va_format vaf;
103 va_list args;
104
105 va_start(args, format);
106 vaf.fmt = format;
107 vaf.va = &args;
108
109 printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
110
111 va_end(args);
112}
113EXPORT_SYMBOL(drm_ut_debug_printk);
114
115struct drm_master *drm_master_create(struct drm_minor *minor)
116{
117 struct drm_master *master;
118
119 master = kzalloc(sizeof(*master), GFP_KERNEL);
120 if (!master)
121 return NULL;
122
123 kref_init(&master->refcount);
124 spin_lock_init(&master->lock.spinlock);
125 init_waitqueue_head(&master->lock.lock_queue);
126 if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
127 kfree(master);
128 return NULL;
129 }
130 INIT_LIST_HEAD(&master->magicfree);
131 master->minor = minor;
132
133 return master;
134}
135
136struct drm_master *drm_master_get(struct drm_master *master)
137{
138 kref_get(&master->refcount);
139 return master;
140}
141EXPORT_SYMBOL(drm_master_get);
142
143static void drm_master_destroy(struct kref *kref)
144{
145 struct drm_master *master = container_of(kref, struct drm_master, refcount);
146 struct drm_magic_entry *pt, *next;
147 struct drm_device *dev = master->minor->dev;
148 struct drm_map_list *r_list, *list_temp;
149
150 mutex_lock(&dev->struct_mutex);
151 if (dev->driver->master_destroy)
152 dev->driver->master_destroy(dev, master);
153
154 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
155 if (r_list->master == master) {
156 drm_rmmap_locked(dev, r_list->map);
157 r_list = NULL;
158 }
159 }
160
161 if (master->unique) {
162 kfree(master->unique);
163 master->unique = NULL;
164 master->unique_len = 0;
165 }
166
167 list_for_each_entry_safe(pt, next, &master->magicfree, head) {
168 list_del(&pt->head);
169 drm_ht_remove_item(&master->magiclist, &pt->hash_item);
170 kfree(pt);
171 }
172
173 drm_ht_remove(&master->magiclist);
174
175 mutex_unlock(&dev->struct_mutex);
176 kfree(master);
177}
178
179void drm_master_put(struct drm_master **master)
180{
181 kref_put(&(*master)->refcount, drm_master_destroy);
182 *master = NULL;
183}
184EXPORT_SYMBOL(drm_master_put);
185
186int drm_setmaster_ioctl(struct drm_device *dev, void *data,
187 struct drm_file *file_priv)
188{
189 int ret = 0;
190
191 mutex_lock(&dev->master_mutex);
192 if (file_priv->is_master)
193 goto out_unlock;
194
195 if (file_priv->minor->master) {
196 ret = -EINVAL;
197 goto out_unlock;
198 }
199
200 if (!file_priv->master) {
201 ret = -EINVAL;
202 goto out_unlock;
203 }
204
205 file_priv->minor->master = drm_master_get(file_priv->master);
206 file_priv->is_master = 1;
207 if (dev->driver->master_set) {
208 ret = dev->driver->master_set(dev, file_priv, false);
209 if (unlikely(ret != 0)) {
210 file_priv->is_master = 0;
211 drm_master_put(&file_priv->minor->master);
212 }
213 }
214
215out_unlock:
216 mutex_unlock(&dev->master_mutex);
217 return ret;
218}
219
220int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
221 struct drm_file *file_priv)
222{
223 int ret = -EINVAL;
224
225 mutex_lock(&dev->master_mutex);
226 if (!file_priv->is_master)
227 goto out_unlock;
228
229 if (!file_priv->minor->master)
230 goto out_unlock;
231
232 ret = 0;
233 if (dev->driver->master_drop)
234 dev->driver->master_drop(dev, file_priv, false);
235 drm_master_put(&file_priv->minor->master);
236 file_priv->is_master = 0;
237
238out_unlock:
239 mutex_unlock(&dev->master_mutex);
240 return ret;
241}
242
243/*
244 * DRM Minors
245 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
246 * of them is represented by a drm_minor object. Depending on the capabilities
247 * of the device-driver, different interfaces are registered.
248 *
249 * Minors can be accessed via dev->$minor_name. This pointer is either
250 * NULL or a valid drm_minor pointer and stays valid as long as the device is
251 * valid. This means, DRM minors have the same life-time as the underlying
252 * device. However, this doesn't mean that the minor is active. Minors are
253 * registered and unregistered dynamically according to device-state.
254 */
255
256static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
257 unsigned int type)
258{
259 switch (type) {
260 case DRM_MINOR_LEGACY:
261 return &dev->primary;
262 case DRM_MINOR_RENDER:
263 return &dev->render;
264 case DRM_MINOR_CONTROL:
265 return &dev->control;
266 default:
267 return NULL;
268 }
269}
270
271static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
272{
273 struct drm_minor *minor;
274
275 minor = kzalloc(sizeof(*minor), GFP_KERNEL);
276 if (!minor)
277 return -ENOMEM;
278
279 minor->type = type;
280 minor->dev = dev;
281
282 *drm_minor_get_slot(dev, type) = minor;
283 return 0;
284}
285
286static void drm_minor_free(struct drm_device *dev, unsigned int type)
287{
288 struct drm_minor **slot;
289
290 slot = drm_minor_get_slot(dev, type);
291 if (*slot) {
292 drm_mode_group_destroy(&(*slot)->mode_group);
293 kfree(*slot);
294 *slot = NULL;
295 }
296}
297
298static int drm_minor_register(struct drm_device *dev, unsigned int type)
299{
300 struct drm_minor *new_minor;
301 unsigned long flags;
302 int ret;
303 int minor_id;
304
305 DRM_DEBUG("\n");
306
307 new_minor = *drm_minor_get_slot(dev, type);
308 if (!new_minor)
309 return 0;
310
311 idr_preload(GFP_KERNEL);
312 spin_lock_irqsave(&drm_minor_lock, flags);
313 minor_id = idr_alloc(&drm_minors_idr,
314 NULL,
315 64 * type,
316 64 * (type + 1),
317 GFP_NOWAIT);
318 spin_unlock_irqrestore(&drm_minor_lock, flags);
319 idr_preload_end();
320
321 if (minor_id < 0)
322 return minor_id;
323
324 new_minor->index = minor_id;
325
326 ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
327 if (ret) {
328 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
329 goto err_id;
330 }
331
332 ret = drm_sysfs_device_add(new_minor);
333 if (ret) {
334 DRM_ERROR("DRM: Error sysfs_device_add.\n");
335 goto err_debugfs;
336 }
337
338 /* replace NULL with @minor so lookups will succeed from now on */
339 spin_lock_irqsave(&drm_minor_lock, flags);
340 idr_replace(&drm_minors_idr, new_minor, new_minor->index);
341 spin_unlock_irqrestore(&drm_minor_lock, flags);
342
343 DRM_DEBUG("new minor assigned %d\n", minor_id);
344 return 0;
345
346err_debugfs:
347 drm_debugfs_cleanup(new_minor);
348err_id:
349 spin_lock_irqsave(&drm_minor_lock, flags);
350 idr_remove(&drm_minors_idr, minor_id);
351 spin_unlock_irqrestore(&drm_minor_lock, flags);
352 new_minor->index = 0;
353 return ret;
354}
355
356static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
357{
358 struct drm_minor *minor;
359 unsigned long flags;
360
361 minor = *drm_minor_get_slot(dev, type);
362 if (!minor || !minor->kdev)
363 return;
364
365 spin_lock_irqsave(&drm_minor_lock, flags);
366 idr_remove(&drm_minors_idr, minor->index);
367 spin_unlock_irqrestore(&drm_minor_lock, flags);
368 minor->index = 0;
369
370 drm_debugfs_cleanup(minor);
371 drm_sysfs_device_remove(minor);
372}
373
374/**
375 * drm_minor_acquire - Acquire a DRM minor
376 * @minor_id: Minor ID of the DRM-minor
377 *
378 * Looks up the given minor-ID and returns the respective DRM-minor object. The
379 * refence-count of the underlying device is increased so you must release this
380 * object with drm_minor_release().
381 *
382 * As long as you hold this minor, it is guaranteed that the object and the
383 * minor->dev pointer will stay valid! However, the device may get unplugged and
384 * unregistered while you hold the minor.
385 *
386 * Returns:
387 * Pointer to minor-object with increased device-refcount, or PTR_ERR on
388 * failure.
389 */
390struct drm_minor *drm_minor_acquire(unsigned int minor_id)
391{
392 struct drm_minor *minor;
393 unsigned long flags;
394
395 spin_lock_irqsave(&drm_minor_lock, flags);
396 minor = idr_find(&drm_minors_idr, minor_id);
397 if (minor)
398 drm_dev_ref(minor->dev);
399 spin_unlock_irqrestore(&drm_minor_lock, flags);
400
401 if (!minor) {
402 return ERR_PTR(-ENODEV);
403 } else if (drm_device_is_unplugged(minor->dev)) {
404 drm_dev_unref(minor->dev);
405 return ERR_PTR(-ENODEV);
406 }
407
408 return minor;
409}
410
411/**
412 * drm_minor_release - Release DRM minor
413 * @minor: Pointer to DRM minor object
414 *
415 * Release a minor that was previously acquired via drm_minor_acquire().
416 */
417void drm_minor_release(struct drm_minor *minor)
418{
419 drm_dev_unref(minor->dev);
420}
421
422/**
423 * drm_put_dev - Unregister and release a DRM device
424 * @dev: DRM device
425 *
426 * Called at module unload time or when a PCI device is unplugged.
427 *
428 * Use of this function is discouraged. It will eventually go away completely.
429 * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
430 *
431 * Cleans up all DRM device, calling drm_lastclose().
432 */
433void drm_put_dev(struct drm_device *dev)
434{
435 DRM_DEBUG("\n");
436
437 if (!dev) {
438 DRM_ERROR("cleanup called no dev\n");
439 return;
440 }
441
442 drm_dev_unregister(dev);
443 drm_dev_unref(dev);
444}
445EXPORT_SYMBOL(drm_put_dev);
446
447void drm_unplug_dev(struct drm_device *dev)
448{
449 /* for a USB device */
450 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
451 drm_minor_unregister(dev, DRM_MINOR_RENDER);
452 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
453
454 mutex_lock(&drm_global_mutex);
455
456 drm_device_set_unplugged(dev);
457
458 if (dev->open_count == 0) {
459 drm_put_dev(dev);
460 }
461 mutex_unlock(&drm_global_mutex);
462}
463EXPORT_SYMBOL(drm_unplug_dev);
464
465/*
466 * DRM internal mount
467 * We want to be able to allocate our own "struct address_space" to control
468 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
469 * stand-alone address_space objects, so we need an underlying inode. As there
470 * is no way to allocate an independent inode easily, we need a fake internal
471 * VFS mount-point.
472 *
473 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
474 * frees it again. You are allowed to use iget() and iput() to get references to
475 * the inode. But each drm_fs_inode_new() call must be paired with exactly one
476 * drm_fs_inode_free() call (which does not have to be the last iput()).
477 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
478 * between multiple inode-users. You could, technically, call
479 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
480 * iput(), but this way you'd end up with a new vfsmount for each inode.
481 */
482
483static int drm_fs_cnt;
484static struct vfsmount *drm_fs_mnt;
485
486static const struct dentry_operations drm_fs_dops = {
487 .d_dname = simple_dname,
488};
489
490static const struct super_operations drm_fs_sops = {
491 .statfs = simple_statfs,
492};
493
494static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
495 const char *dev_name, void *data)
496{
497 return mount_pseudo(fs_type,
498 "drm:",
499 &drm_fs_sops,
500 &drm_fs_dops,
501 0x010203ff);
502}
503
504static struct file_system_type drm_fs_type = {
505 .name = "drm",
506 .owner = THIS_MODULE,
507 .mount = drm_fs_mount,
508 .kill_sb = kill_anon_super,
509};
510
511static struct inode *drm_fs_inode_new(void)
512{
513 struct inode *inode;
514 int r;
515
516 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
517 if (r < 0) {
518 DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
519 return ERR_PTR(r);
520 }
521
522 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
523 if (IS_ERR(inode))
524 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
525
526 return inode;
527}
528
529static void drm_fs_inode_free(struct inode *inode)
530{
531 if (inode) {
532 iput(inode);
533 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
534 }
535}
536
537/**
538 * drm_dev_alloc - Allocate new DRM device
539 * @driver: DRM driver to allocate device for
540 * @parent: Parent device object
541 *
542 * Allocate and initialize a new DRM device. No device registration is done.
543 * Call drm_dev_register() to advertice the device to user space and register it
544 * with other core subsystems.
545 *
546 * The initial ref-count of the object is 1. Use drm_dev_ref() and
547 * drm_dev_unref() to take and drop further ref-counts.
548 *
549 * RETURNS:
550 * Pointer to new DRM device, or NULL if out of memory.
551 */
552struct drm_device *drm_dev_alloc(struct drm_driver *driver,
553 struct device *parent)
554{
555 struct drm_device *dev;
556 int ret;
557
558 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
559 if (!dev)
560 return NULL;
561
562 kref_init(&dev->ref);
563 dev->dev = parent;
564 dev->driver = driver;
565
566 INIT_LIST_HEAD(&dev->filelist);
567 INIT_LIST_HEAD(&dev->ctxlist);
568 INIT_LIST_HEAD(&dev->vmalist);
569 INIT_LIST_HEAD(&dev->maplist);
570 INIT_LIST_HEAD(&dev->vblank_event_list);
571
572 spin_lock_init(&dev->buf_lock);
573 spin_lock_init(&dev->event_lock);
574 mutex_init(&dev->struct_mutex);
575 mutex_init(&dev->ctxlist_mutex);
576 mutex_init(&dev->master_mutex);
577
578 dev->anon_inode = drm_fs_inode_new();
579 if (IS_ERR(dev->anon_inode)) {
580 ret = PTR_ERR(dev->anon_inode);
581 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
582 goto err_free;
583 }
584
585 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
586 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
587 if (ret)
588 goto err_minors;
589 }
590
591 if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
592 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
593 if (ret)
594 goto err_minors;
595 }
596
597 ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
598 if (ret)
599 goto err_minors;
600
601 if (drm_ht_create(&dev->map_hash, 12))
602 goto err_minors;
603
604 ret = drm_ctxbitmap_init(dev);
605 if (ret) {
606 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
607 goto err_ht;
608 }
609
610 if (driver->driver_features & DRIVER_GEM) {
611 ret = drm_gem_init(dev);
612 if (ret) {
613 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
614 goto err_ctxbitmap;
615 }
616 }
617
618 return dev;
619
620err_ctxbitmap:
621 drm_ctxbitmap_cleanup(dev);
622err_ht:
623 drm_ht_remove(&dev->map_hash);
624err_minors:
625 drm_minor_free(dev, DRM_MINOR_LEGACY);
626 drm_minor_free(dev, DRM_MINOR_RENDER);
627 drm_minor_free(dev, DRM_MINOR_CONTROL);
628 drm_fs_inode_free(dev->anon_inode);
629err_free:
630 mutex_destroy(&dev->master_mutex);
631 kfree(dev);
632 return NULL;
633}
634EXPORT_SYMBOL(drm_dev_alloc);
635
636static void drm_dev_release(struct kref *ref)
637{
638 struct drm_device *dev = container_of(ref, struct drm_device, ref);
639
640 if (dev->driver->driver_features & DRIVER_GEM)
641 drm_gem_destroy(dev);
642
643 drm_ctxbitmap_cleanup(dev);
644 drm_ht_remove(&dev->map_hash);
645 drm_fs_inode_free(dev->anon_inode);
646
647 drm_minor_free(dev, DRM_MINOR_LEGACY);
648 drm_minor_free(dev, DRM_MINOR_RENDER);
649 drm_minor_free(dev, DRM_MINOR_CONTROL);
650
651 mutex_destroy(&dev->master_mutex);
652 kfree(dev->unique);
653 kfree(dev);
654}
655
656/**
657 * drm_dev_ref - Take reference of a DRM device
658 * @dev: device to take reference of or NULL
659 *
660 * This increases the ref-count of @dev by one. You *must* already own a
661 * reference when calling this. Use drm_dev_unref() to drop this reference
662 * again.
663 *
664 * This function never fails. However, this function does not provide *any*
665 * guarantee whether the device is alive or running. It only provides a
666 * reference to the object and the memory associated with it.
667 */
668void drm_dev_ref(struct drm_device *dev)
669{
670 if (dev)
671 kref_get(&dev->ref);
672}
673EXPORT_SYMBOL(drm_dev_ref);
674
675/**
676 * drm_dev_unref - Drop reference of a DRM device
677 * @dev: device to drop reference of or NULL
678 *
679 * This decreases the ref-count of @dev by one. The device is destroyed if the
680 * ref-count drops to zero.
681 */
682void drm_dev_unref(struct drm_device *dev)
683{
684 if (dev)
685 kref_put(&dev->ref, drm_dev_release);
686}
687EXPORT_SYMBOL(drm_dev_unref);
688
689/**
690 * drm_dev_register - Register DRM device
691 * @dev: Device to register
692 * @flags: Flags passed to the driver's .load() function
693 *
694 * Register the DRM device @dev with the system, advertise device to user-space
695 * and start normal device operation. @dev must be allocated via drm_dev_alloc()
696 * previously.
697 *
698 * Never call this twice on any device!
699 *
700 * RETURNS:
701 * 0 on success, negative error code on failure.
702 */
703int drm_dev_register(struct drm_device *dev, unsigned long flags)
704{
705 int ret;
706
707 mutex_lock(&drm_global_mutex);
708
709 ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
710 if (ret)
711 goto err_minors;
712
713 ret = drm_minor_register(dev, DRM_MINOR_RENDER);
714 if (ret)
715 goto err_minors;
716
717 ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
718 if (ret)
719 goto err_minors;
720
721 if (dev->driver->load) {
722 ret = dev->driver->load(dev, flags);
723 if (ret)
724 goto err_minors;
725 }
726
727 /* setup grouping for legacy outputs */
728 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
729 ret = drm_mode_group_init_legacy_group(dev,
730 &dev->primary->mode_group);
731 if (ret)
732 goto err_unload;
733 }
734
735 ret = 0;
736 goto out_unlock;
737
738err_unload:
739 if (dev->driver->unload)
740 dev->driver->unload(dev);
741err_minors:
742 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
743 drm_minor_unregister(dev, DRM_MINOR_RENDER);
744 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
745out_unlock:
746 mutex_unlock(&drm_global_mutex);
747 return ret;
748}
749EXPORT_SYMBOL(drm_dev_register);
750
751/**
752 * drm_dev_unregister - Unregister DRM device
753 * @dev: Device to unregister
754 *
755 * Unregister the DRM device from the system. This does the reverse of
756 * drm_dev_register() but does not deallocate the device. The caller must call
757 * drm_dev_unref() to drop their final reference.
758 */
759void drm_dev_unregister(struct drm_device *dev)
760{
761 struct drm_map_list *r_list, *list_temp;
762
763 drm_lastclose(dev);
764
765 if (dev->driver->unload)
766 dev->driver->unload(dev);
767
768 if (dev->agp)
769 drm_pci_agp_destroy(dev);
770
771 drm_vblank_cleanup(dev);
772
773 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
774 drm_rmmap(dev, r_list->map);
775
776 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
777 drm_minor_unregister(dev, DRM_MINOR_RENDER);
778 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
779}
780EXPORT_SYMBOL(drm_dev_unregister);
781
782/**
783 * drm_dev_set_unique - Set the unique name of a DRM device
784 * @dev: device of which to set the unique name
785 * @fmt: format string for unique name
786 *
787 * Sets the unique name of a DRM device using the specified format string and
788 * a variable list of arguments. Drivers can use this at driver probe time if
789 * the unique name of the devices they drive is static.
790 *
791 * Return: 0 on success or a negative error code on failure.
792 */
793int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
794{
795 va_list ap;
796
797 kfree(dev->unique);
798
799 va_start(ap, fmt);
800 dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
801 va_end(ap);
802
803 return dev->unique ? 0 : -ENOMEM;
804}
805EXPORT_SYMBOL(drm_dev_set_unique);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 369b26278e76..ab1a5f6dde8a 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -438,7 +438,6 @@ err_out_files:
438out: 438out:
439 return ret; 439 return ret;
440} 440}
441EXPORT_SYMBOL(drm_sysfs_connector_add);
442 441
443/** 442/**
444 * drm_sysfs_connector_remove - remove an connector device from sysfs 443 * drm_sysfs_connector_remove - remove an connector device from sysfs
@@ -468,7 +467,6 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
468 device_unregister(connector->kdev); 467 device_unregister(connector->kdev);
469 connector->kdev = NULL; 468 connector->kdev = NULL;
470} 469}
471EXPORT_SYMBOL(drm_sysfs_connector_remove);
472 470
473/** 471/**
474 * drm_sysfs_hotplug_event - generate a DRM uevent 472 * drm_sysfs_hotplug_event - generate a DRM uevent
@@ -495,71 +493,55 @@ static void drm_sysfs_release(struct device *dev)
495} 493}
496 494
497/** 495/**
498 * drm_sysfs_device_add - adds a class device to sysfs for a character driver 496 * drm_sysfs_minor_alloc() - Allocate sysfs device for given minor
499 * @dev: DRM device to be added 497 * @minor: minor to allocate sysfs device for
500 * @head: DRM head in question
501 * 498 *
502 * Add a DRM device to the DRM's device model class. We use @dev's PCI device 499 * This allocates a new sysfs device for @minor and returns it. The device is
503 * as the parent for the Linux device, and make sure it has a file containing 500 * not registered nor linked. The caller has to use device_add() and
504 * the driver we're using (for userspace compatibility). 501 * device_del() to register and unregister it.
502 *
503 * Note that dev_get_drvdata() on the new device will return the minor.
504 * However, the device does not hold a ref-count to the minor nor to the
505 * underlying drm_device. This is unproblematic as long as you access the
506 * private data only in sysfs callbacks. device_del() disables those
507 * synchronously, so they cannot be called after you cleanup a minor.
505 */ 508 */
506int drm_sysfs_device_add(struct drm_minor *minor) 509struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
507{ 510{
508 char *minor_str; 511 const char *minor_str;
512 struct device *kdev;
509 int r; 513 int r;
510 514
511 if (minor->type == DRM_MINOR_CONTROL) 515 if (minor->type == DRM_MINOR_CONTROL)
512 minor_str = "controlD%d"; 516 minor_str = "controlD%d";
513 else if (minor->type == DRM_MINOR_RENDER) 517 else if (minor->type == DRM_MINOR_RENDER)
514 minor_str = "renderD%d"; 518 minor_str = "renderD%d";
515 else 519 else
516 minor_str = "card%d"; 520 minor_str = "card%d";
517 521
518 minor->kdev = kzalloc(sizeof(*minor->kdev), GFP_KERNEL); 522 kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
519 if (!minor->kdev) { 523 if (!kdev)
520 r = -ENOMEM; 524 return ERR_PTR(-ENOMEM);
521 goto error; 525
522 } 526 device_initialize(kdev);
523 527 kdev->devt = MKDEV(DRM_MAJOR, minor->index);
524 device_initialize(minor->kdev); 528 kdev->class = drm_class;
525 minor->kdev->devt = MKDEV(DRM_MAJOR, minor->index); 529 kdev->type = &drm_sysfs_device_minor;
526 minor->kdev->class = drm_class; 530 kdev->parent = minor->dev->dev;
527 minor->kdev->type = &drm_sysfs_device_minor; 531 kdev->release = drm_sysfs_release;
528 minor->kdev->parent = minor->dev->dev; 532 dev_set_drvdata(kdev, minor);
529 minor->kdev->release = drm_sysfs_release; 533
530 dev_set_drvdata(minor->kdev, minor); 534 r = dev_set_name(kdev, minor_str, minor->index);
531
532 r = dev_set_name(minor->kdev, minor_str, minor->index);
533 if (r < 0) 535 if (r < 0)
534 goto error; 536 goto err_free;
535
536 r = device_add(minor->kdev);
537 if (r < 0)
538 goto error;
539
540 return 0;
541 537
542error: 538 return kdev;
543 DRM_ERROR("device create failed %d\n", r);
544 put_device(minor->kdev);
545 return r;
546}
547 539
548/** 540err_free:
549 * drm_sysfs_device_remove - remove DRM device 541 put_device(kdev);
550 * @dev: DRM device to remove 542 return ERR_PTR(r);
551 *
552 * This call unregisters and cleans up a class device that was created with a
553 * call to drm_sysfs_device_add()
554 */
555void drm_sysfs_device_remove(struct drm_minor *minor)
556{
557 if (minor->kdev)
558 device_unregister(minor->kdev);
559 minor->kdev = NULL;
560} 543}
561 544
562
563/** 545/**
564 * drm_class_device_register - Register a struct device in the drm class. 546 * drm_class_device_register - Register a struct device in the drm class.
565 * 547 *
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 178d2a9672a8..7f9f6f9e9b7e 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -28,6 +28,7 @@ config DRM_EXYNOS_FIMD
28 bool "Exynos DRM FIMD" 28 bool "Exynos DRM FIMD"
29 depends on DRM_EXYNOS && !FB_S3C 29 depends on DRM_EXYNOS && !FB_S3C
30 select FB_MODE_HELPERS 30 select FB_MODE_HELPERS
31 select MFD_SYSCON
31 help 32 help
32 Choose this option if you want to use Exynos FIMD for DRM. 33 Choose this option if you want to use Exynos FIMD for DRM.
33 34
@@ -52,6 +53,7 @@ config DRM_EXYNOS_DP
52 bool "EXYNOS DRM DP driver support" 53 bool "EXYNOS DRM DP driver support"
53 depends on DRM_EXYNOS_FIMD && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) 54 depends on DRM_EXYNOS_FIMD && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
54 default DRM_EXYNOS 55 default DRM_EXYNOS
56 select DRM_PANEL
55 help 57 help
56 This enables support for DP device. 58 This enables support for DP device.
57 59
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index a8ffc8c1477b..4f3c7eb2d37d 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -16,7 +16,6 @@
16#include <linux/clk.h> 16#include <linux/clk.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/delay.h>
20#include <linux/of.h> 19#include <linux/of.h>
21#include <linux/of_gpio.h> 20#include <linux/of_gpio.h>
22#include <linux/gpio.h> 21#include <linux/gpio.h>
@@ -28,6 +27,7 @@
28#include <drm/drmP.h> 27#include <drm/drmP.h>
29#include <drm/drm_crtc.h> 28#include <drm/drm_crtc.h>
30#include <drm/drm_crtc_helper.h> 29#include <drm/drm_crtc_helper.h>
30#include <drm/drm_panel.h>
31#include <drm/bridge/ptn3460.h> 31#include <drm/bridge/ptn3460.h>
32 32
33#include "exynos_drm_drv.h" 33#include "exynos_drm_drv.h"
@@ -41,7 +41,7 @@ struct bridge_init {
41 struct device_node *node; 41 struct device_node *node;
42}; 42};
43 43
44static int exynos_dp_init_dp(struct exynos_dp_device *dp) 44static void exynos_dp_init_dp(struct exynos_dp_device *dp)
45{ 45{
46 exynos_dp_reset(dp); 46 exynos_dp_reset(dp);
47 47
@@ -58,8 +58,6 @@ static int exynos_dp_init_dp(struct exynos_dp_device *dp)
58 58
59 exynos_dp_init_hpd(dp); 59 exynos_dp_init_hpd(dp);
60 exynos_dp_init_aux(dp); 60 exynos_dp_init_aux(dp);
61
62 return 0;
63} 61}
64 62
65static int exynos_dp_detect_hpd(struct exynos_dp_device *dp) 63static int exynos_dp_detect_hpd(struct exynos_dp_device *dp)
@@ -875,10 +873,24 @@ static irqreturn_t exynos_dp_irq_handler(int irq, void *arg)
875static void exynos_dp_hotplug(struct work_struct *work) 873static void exynos_dp_hotplug(struct work_struct *work)
876{ 874{
877 struct exynos_dp_device *dp; 875 struct exynos_dp_device *dp;
878 int ret;
879 876
880 dp = container_of(work, struct exynos_dp_device, hotplug_work); 877 dp = container_of(work, struct exynos_dp_device, hotplug_work);
881 878
879 if (dp->drm_dev)
880 drm_helper_hpd_irq_event(dp->drm_dev);
881}
882
883static void exynos_dp_commit(struct exynos_drm_display *display)
884{
885 struct exynos_dp_device *dp = display->ctx;
886 int ret;
887
888 /* Keep the panel disabled while we configure video */
889 if (dp->panel) {
890 if (drm_panel_disable(dp->panel))
891 DRM_ERROR("failed to disable the panel\n");
892 }
893
882 ret = exynos_dp_detect_hpd(dp); 894 ret = exynos_dp_detect_hpd(dp);
883 if (ret) { 895 if (ret) {
884 /* Cable has been disconnected, we're done */ 896 /* Cable has been disconnected, we're done */
@@ -909,6 +921,12 @@ static void exynos_dp_hotplug(struct work_struct *work)
909 ret = exynos_dp_config_video(dp); 921 ret = exynos_dp_config_video(dp);
910 if (ret) 922 if (ret)
911 dev_err(dp->dev, "unable to config video\n"); 923 dev_err(dp->dev, "unable to config video\n");
924
925 /* Safe to enable the panel now */
926 if (dp->panel) {
927 if (drm_panel_enable(dp->panel))
928 DRM_ERROR("failed to enable the panel\n");
929 }
912} 930}
913 931
914static enum drm_connector_status exynos_dp_detect( 932static enum drm_connector_status exynos_dp_detect(
@@ -933,15 +951,18 @@ static int exynos_dp_get_modes(struct drm_connector *connector)
933 struct exynos_dp_device *dp = ctx_from_connector(connector); 951 struct exynos_dp_device *dp = ctx_from_connector(connector);
934 struct drm_display_mode *mode; 952 struct drm_display_mode *mode;
935 953
954 if (dp->panel)
955 return drm_panel_get_modes(dp->panel);
956
936 mode = drm_mode_create(connector->dev); 957 mode = drm_mode_create(connector->dev);
937 if (!mode) { 958 if (!mode) {
938 DRM_ERROR("failed to create a new display mode.\n"); 959 DRM_ERROR("failed to create a new display mode.\n");
939 return 0; 960 return 0;
940 } 961 }
941 962
942 drm_display_mode_from_videomode(&dp->panel.vm, mode); 963 drm_display_mode_from_videomode(&dp->priv.vm, mode);
943 mode->width_mm = dp->panel.width_mm; 964 mode->width_mm = dp->priv.width_mm;
944 mode->height_mm = dp->panel.height_mm; 965 mode->height_mm = dp->priv.height_mm;
945 connector->display_info.width_mm = mode->width_mm; 966 connector->display_info.width_mm = mode->width_mm;
946 connector->display_info.height_mm = mode->height_mm; 967 connector->display_info.height_mm = mode->height_mm;
947 968
@@ -1018,10 +1039,13 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display,
1018 } 1039 }
1019 1040
1020 drm_connector_helper_add(connector, &exynos_dp_connector_helper_funcs); 1041 drm_connector_helper_add(connector, &exynos_dp_connector_helper_funcs);
1021 drm_sysfs_connector_add(connector); 1042 drm_connector_register(connector);
1022 drm_mode_connector_attach_encoder(connector, encoder); 1043 drm_mode_connector_attach_encoder(connector, encoder);
1023 1044
1024 return 0; 1045 if (dp->panel)
1046 ret = drm_panel_attach(dp->panel, &dp->connector);
1047
1048 return ret;
1025} 1049}
1026 1050
1027static void exynos_dp_phy_init(struct exynos_dp_device *dp) 1051static void exynos_dp_phy_init(struct exynos_dp_device *dp)
@@ -1050,26 +1074,50 @@ static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
1050 } 1074 }
1051} 1075}
1052 1076
1053static void exynos_dp_poweron(struct exynos_dp_device *dp) 1077static void exynos_dp_poweron(struct exynos_drm_display *display)
1054{ 1078{
1079 struct exynos_dp_device *dp = display->ctx;
1080
1055 if (dp->dpms_mode == DRM_MODE_DPMS_ON) 1081 if (dp->dpms_mode == DRM_MODE_DPMS_ON)
1056 return; 1082 return;
1057 1083
1084 if (dp->panel) {
1085 if (drm_panel_prepare(dp->panel)) {
1086 DRM_ERROR("failed to setup the panel\n");
1087 return;
1088 }
1089 }
1090
1058 clk_prepare_enable(dp->clock); 1091 clk_prepare_enable(dp->clock);
1059 exynos_dp_phy_init(dp); 1092 exynos_dp_phy_init(dp);
1060 exynos_dp_init_dp(dp); 1093 exynos_dp_init_dp(dp);
1061 enable_irq(dp->irq); 1094 enable_irq(dp->irq);
1095 exynos_dp_commit(display);
1062} 1096}
1063 1097
1064static void exynos_dp_poweroff(struct exynos_dp_device *dp) 1098static void exynos_dp_poweroff(struct exynos_drm_display *display)
1065{ 1099{
1100 struct exynos_dp_device *dp = display->ctx;
1101
1066 if (dp->dpms_mode != DRM_MODE_DPMS_ON) 1102 if (dp->dpms_mode != DRM_MODE_DPMS_ON)
1067 return; 1103 return;
1068 1104
1105 if (dp->panel) {
1106 if (drm_panel_disable(dp->panel)) {
1107 DRM_ERROR("failed to disable the panel\n");
1108 return;
1109 }
1110 }
1111
1069 disable_irq(dp->irq); 1112 disable_irq(dp->irq);
1070 flush_work(&dp->hotplug_work); 1113 flush_work(&dp->hotplug_work);
1071 exynos_dp_phy_exit(dp); 1114 exynos_dp_phy_exit(dp);
1072 clk_disable_unprepare(dp->clock); 1115 clk_disable_unprepare(dp->clock);
1116
1117 if (dp->panel) {
1118 if (drm_panel_unprepare(dp->panel))
1119 DRM_ERROR("failed to turnoff the panel\n");
1120 }
1073} 1121}
1074 1122
1075static void exynos_dp_dpms(struct exynos_drm_display *display, int mode) 1123static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
@@ -1078,12 +1126,12 @@ static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
1078 1126
1079 switch (mode) { 1127 switch (mode) {
1080 case DRM_MODE_DPMS_ON: 1128 case DRM_MODE_DPMS_ON:
1081 exynos_dp_poweron(dp); 1129 exynos_dp_poweron(display);
1082 break; 1130 break;
1083 case DRM_MODE_DPMS_STANDBY: 1131 case DRM_MODE_DPMS_STANDBY:
1084 case DRM_MODE_DPMS_SUSPEND: 1132 case DRM_MODE_DPMS_SUSPEND:
1085 case DRM_MODE_DPMS_OFF: 1133 case DRM_MODE_DPMS_OFF:
1086 exynos_dp_poweroff(dp); 1134 exynos_dp_poweroff(display);
1087 break; 1135 break;
1088 default: 1136 default:
1089 break; 1137 break;
@@ -1094,6 +1142,7 @@ static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
1094static struct exynos_drm_display_ops exynos_dp_display_ops = { 1142static struct exynos_drm_display_ops exynos_dp_display_ops = {
1095 .create_connector = exynos_dp_create_connector, 1143 .create_connector = exynos_dp_create_connector,
1096 .dpms = exynos_dp_dpms, 1144 .dpms = exynos_dp_dpms,
1145 .commit = exynos_dp_commit,
1097}; 1146};
1098 1147
1099static struct exynos_drm_display exynos_dp_display = { 1148static struct exynos_drm_display exynos_dp_display = {
@@ -1201,7 +1250,7 @@ static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
1201{ 1250{
1202 int ret; 1251 int ret;
1203 1252
1204 ret = of_get_videomode(dp->dev->of_node, &dp->panel.vm, 1253 ret = of_get_videomode(dp->dev->of_node, &dp->priv.vm,
1205 OF_USE_NATIVE_MODE); 1254 OF_USE_NATIVE_MODE);
1206 if (ret) { 1255 if (ret) {
1207 DRM_ERROR("failed: of_get_videomode() : %d\n", ret); 1256 DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
@@ -1215,16 +1264,10 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
1215 struct platform_device *pdev = to_platform_device(dev); 1264 struct platform_device *pdev = to_platform_device(dev);
1216 struct drm_device *drm_dev = data; 1265 struct drm_device *drm_dev = data;
1217 struct resource *res; 1266 struct resource *res;
1218 struct exynos_dp_device *dp; 1267 struct exynos_dp_device *dp = exynos_dp_display.ctx;
1219 unsigned int irq_flags; 1268 unsigned int irq_flags;
1220
1221 int ret = 0; 1269 int ret = 0;
1222 1270
1223 dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
1224 GFP_KERNEL);
1225 if (!dp)
1226 return -ENOMEM;
1227
1228 dp->dev = &pdev->dev; 1271 dp->dev = &pdev->dev;
1229 dp->dpms_mode = DRM_MODE_DPMS_OFF; 1272 dp->dpms_mode = DRM_MODE_DPMS_OFF;
1230 1273
@@ -1236,9 +1279,11 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
1236 if (ret) 1279 if (ret)
1237 return ret; 1280 return ret;
1238 1281
1239 ret = exynos_dp_dt_parse_panel(dp); 1282 if (!dp->panel) {
1240 if (ret) 1283 ret = exynos_dp_dt_parse_panel(dp);
1241 return ret; 1284 if (ret)
1285 return ret;
1286 }
1242 1287
1243 dp->clock = devm_clk_get(&pdev->dev, "dp"); 1288 dp->clock = devm_clk_get(&pdev->dev, "dp");
1244 if (IS_ERR(dp->clock)) { 1289 if (IS_ERR(dp->clock)) {
@@ -1298,7 +1343,6 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
1298 disable_irq(dp->irq); 1343 disable_irq(dp->irq);
1299 1344
1300 dp->drm_dev = drm_dev; 1345 dp->drm_dev = drm_dev;
1301 exynos_dp_display.ctx = dp;
1302 1346
1303 platform_set_drvdata(pdev, &exynos_dp_display); 1347 platform_set_drvdata(pdev, &exynos_dp_display);
1304 1348
@@ -1325,6 +1369,9 @@ static const struct component_ops exynos_dp_ops = {
1325 1369
1326static int exynos_dp_probe(struct platform_device *pdev) 1370static int exynos_dp_probe(struct platform_device *pdev)
1327{ 1371{
1372 struct device *dev = &pdev->dev;
1373 struct device_node *panel_node;
1374 struct exynos_dp_device *dp;
1328 int ret; 1375 int ret;
1329 1376
1330 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR, 1377 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
@@ -1332,6 +1379,21 @@ static int exynos_dp_probe(struct platform_device *pdev)
1332 if (ret) 1379 if (ret)
1333 return ret; 1380 return ret;
1334 1381
1382 dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
1383 GFP_KERNEL);
1384 if (!dp)
1385 return -ENOMEM;
1386
1387 panel_node = of_parse_phandle(dev->of_node, "panel", 0);
1388 if (panel_node) {
1389 dp->panel = of_drm_find_panel(panel_node);
1390 of_node_put(panel_node);
1391 if (!dp->panel)
1392 return -EPROBE_DEFER;
1393 }
1394
1395 exynos_dp_display.ctx = dp;
1396
1335 ret = component_add(&pdev->dev, &exynos_dp_ops); 1397 ret = component_add(&pdev->dev, &exynos_dp_ops);
1336 if (ret) 1398 if (ret)
1337 exynos_drm_component_del(&pdev->dev, 1399 exynos_drm_component_del(&pdev->dev,
@@ -1376,6 +1438,7 @@ static const struct of_device_id exynos_dp_match[] = {
1376 { .compatible = "samsung,exynos5-dp" }, 1438 { .compatible = "samsung,exynos5-dp" },
1377 {}, 1439 {},
1378}; 1440};
1441MODULE_DEVICE_TABLE(of, exynos_dp_match);
1379 1442
1380struct platform_driver dp_driver = { 1443struct platform_driver dp_driver = {
1381 .probe = exynos_dp_probe, 1444 .probe = exynos_dp_probe,
@@ -1390,4 +1453,4 @@ struct platform_driver dp_driver = {
1390 1453
1391MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); 1454MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
1392MODULE_DESCRIPTION("Samsung SoC DP Driver"); 1455MODULE_DESCRIPTION("Samsung SoC DP Driver");
1393MODULE_LICENSE("GPL"); 1456MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index 02cc4f9ab903..a1aee6931bd7 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -149,6 +149,7 @@ struct exynos_dp_device {
149 struct drm_device *drm_dev; 149 struct drm_device *drm_dev;
150 struct drm_connector connector; 150 struct drm_connector connector;
151 struct drm_encoder *encoder; 151 struct drm_encoder *encoder;
152 struct drm_panel *panel;
152 struct clk *clock; 153 struct clk *clock;
153 unsigned int irq; 154 unsigned int irq;
154 void __iomem *reg_base; 155 void __iomem *reg_base;
@@ -162,7 +163,7 @@ struct exynos_dp_device {
162 int dpms_mode; 163 int dpms_mode;
163 int hpd_gpio; 164 int hpd_gpio;
164 165
165 struct exynos_drm_panel_info panel; 166 struct exynos_drm_panel_info priv;
166}; 167};
167 168
168/* exynos_dp_reg.c */ 169/* exynos_dp_reg.c */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 9a16dbe121d1..ba9b3d5ed672 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -117,20 +117,7 @@ static struct drm_encoder *exynos_drm_best_encoder(
117 struct drm_device *dev = connector->dev; 117 struct drm_device *dev = connector->dev;
118 struct exynos_drm_connector *exynos_connector = 118 struct exynos_drm_connector *exynos_connector =
119 to_exynos_connector(connector); 119 to_exynos_connector(connector);
120 struct drm_mode_object *obj; 120 return drm_encoder_find(dev, exynos_connector->encoder_id);
121 struct drm_encoder *encoder;
122
123 obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
124 DRM_MODE_OBJECT_ENCODER);
125 if (!obj) {
126 DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
127 exynos_connector->encoder_id);
128 return NULL;
129 }
130
131 encoder = obj_to_encoder(obj);
132
133 return encoder;
134} 121}
135 122
136static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { 123static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
@@ -185,7 +172,7 @@ static void exynos_drm_connector_destroy(struct drm_connector *connector)
185 struct exynos_drm_connector *exynos_connector = 172 struct exynos_drm_connector *exynos_connector =
186 to_exynos_connector(connector); 173 to_exynos_connector(connector);
187 174
188 drm_sysfs_connector_remove(connector); 175 drm_connector_unregister(connector);
189 drm_connector_cleanup(connector); 176 drm_connector_cleanup(connector);
190 kfree(exynos_connector); 177 kfree(exynos_connector);
191} 178}
@@ -230,7 +217,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
230 drm_connector_init(dev, connector, &exynos_connector_funcs, type); 217 drm_connector_init(dev, connector, &exynos_connector_funcs, type);
231 drm_connector_helper_add(connector, &exynos_connector_helper_funcs); 218 drm_connector_helper_add(connector, &exynos_connector_helper_funcs);
232 219
233 err = drm_sysfs_connector_add(connector); 220 err = drm_connector_register(connector);
234 if (err) 221 if (err)
235 goto err_connector; 222 goto err_connector;
236 223
@@ -250,7 +237,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
250 return connector; 237 return connector;
251 238
252err_sysfs: 239err_sysfs:
253 drm_sysfs_connector_remove(connector); 240 drm_connector_unregister(connector);
254err_connector: 241err_connector:
255 drm_connector_cleanup(connector); 242 drm_connector_cleanup(connector);
256 kfree(exynos_connector); 243 kfree(exynos_connector);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 95c9435d0266..b68e58f78cd1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -69,8 +69,10 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
69 69
70 if (mode > DRM_MODE_DPMS_ON) { 70 if (mode > DRM_MODE_DPMS_ON) {
71 /* wait for the completion of page flip. */ 71 /* wait for the completion of page flip. */
72 wait_event(exynos_crtc->pending_flip_queue, 72 if (!wait_event_timeout(exynos_crtc->pending_flip_queue,
73 atomic_read(&exynos_crtc->pending_flip) == 0); 73 !atomic_read(&exynos_crtc->pending_flip),
74 HZ/20))
75 atomic_set(&exynos_crtc->pending_flip, 0);
74 drm_vblank_off(crtc->dev, exynos_crtc->pipe); 76 drm_vblank_off(crtc->dev, exynos_crtc->pipe);
75 } 77 }
76 78
@@ -259,6 +261,7 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
259 spin_lock_irq(&dev->event_lock); 261 spin_lock_irq(&dev->event_lock);
260 drm_vblank_put(dev, exynos_crtc->pipe); 262 drm_vblank_put(dev, exynos_crtc->pipe);
261 list_del(&event->base.link); 263 list_del(&event->base.link);
264 atomic_set(&exynos_crtc->pending_flip, 0);
262 spin_unlock_irq(&dev->event_lock); 265 spin_unlock_irq(&dev->event_lock);
263 266
264 goto out; 267 goto out;
@@ -508,3 +511,11 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
508 511
509 return -EPERM; 512 return -EPERM;
510} 513}
514
515void exynos_drm_crtc_te_handler(struct drm_crtc *crtc)
516{
517 struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
518
519 if (manager->ops->te_handler)
520 manager->ops->te_handler(manager);
521}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 9f74b10a8a01..690dcddab725 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -36,4 +36,11 @@ void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos);
36int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, 36int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
37 unsigned int out_type); 37 unsigned int out_type);
38 38
39/*
40 * This function calls the crtc device(manager)'s te_handler() callback
41 * to trigger to transfer video image at the tearing effect synchronization
42 * signal.
43 */
44void exynos_drm_crtc_te_handler(struct drm_crtc *crtc);
45
39#endif 46#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 9e530f205ad2..fa08f05e3e34 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -48,7 +48,7 @@ exynos_dpi_detect(struct drm_connector *connector, bool force)
48 48
49static void exynos_dpi_connector_destroy(struct drm_connector *connector) 49static void exynos_dpi_connector_destroy(struct drm_connector *connector)
50{ 50{
51 drm_sysfs_connector_remove(connector); 51 drm_connector_unregister(connector);
52 drm_connector_cleanup(connector); 52 drm_connector_cleanup(connector);
53} 53}
54 54
@@ -117,7 +117,7 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display,
117 } 117 }
118 118
119 drm_connector_helper_add(connector, &exynos_dpi_connector_helper_funcs); 119 drm_connector_helper_add(connector, &exynos_dpi_connector_helper_funcs);
120 drm_sysfs_connector_add(connector); 120 drm_connector_register(connector);
121 drm_mode_connector_attach_encoder(connector, encoder); 121 drm_mode_connector_attach_encoder(connector, encoder);
122 122
123 return 0; 123 return 0;
@@ -125,14 +125,18 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display,
125 125
126static void exynos_dpi_poweron(struct exynos_dpi *ctx) 126static void exynos_dpi_poweron(struct exynos_dpi *ctx)
127{ 127{
128 if (ctx->panel) 128 if (ctx->panel) {
129 drm_panel_prepare(ctx->panel);
129 drm_panel_enable(ctx->panel); 130 drm_panel_enable(ctx->panel);
131 }
130} 132}
131 133
132static void exynos_dpi_poweroff(struct exynos_dpi *ctx) 134static void exynos_dpi_poweroff(struct exynos_dpi *ctx)
133{ 135{
134 if (ctx->panel) 136 if (ctx->panel) {
135 drm_panel_disable(ctx->panel); 137 drm_panel_disable(ctx->panel);
138 drm_panel_unprepare(ctx->panel);
139 }
136} 140}
137 141
138static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode) 142static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ab7d182063c3..0d74e9b99c4e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -39,8 +39,6 @@
39#define DRIVER_MAJOR 1 39#define DRIVER_MAJOR 1
40#define DRIVER_MINOR 0 40#define DRIVER_MINOR 0
41 41
42#define VBLANK_OFF_DELAY 50000
43
44static struct platform_device *exynos_drm_pdev; 42static struct platform_device *exynos_drm_pdev;
45 43
46static DEFINE_MUTEX(drm_component_lock); 44static DEFINE_MUTEX(drm_component_lock);
@@ -103,8 +101,6 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
103 /* setup possible_clones. */ 101 /* setup possible_clones. */
104 exynos_drm_encoder_setup(dev); 102 exynos_drm_encoder_setup(dev);
105 103
106 drm_vblank_offdelay = VBLANK_OFF_DELAY;
107
108 platform_set_drvdata(dev->platformdev, dev); 104 platform_set_drvdata(dev->platformdev, dev);
109 105
110 /* Try to bind all sub drivers. */ 106 /* Try to bind all sub drivers. */
@@ -362,7 +358,7 @@ static int exynos_drm_sys_suspend(struct device *dev)
362 struct drm_device *drm_dev = dev_get_drvdata(dev); 358 struct drm_device *drm_dev = dev_get_drvdata(dev);
363 pm_message_t message; 359 pm_message_t message;
364 360
365 if (pm_runtime_suspended(dev)) 361 if (pm_runtime_suspended(dev) || !drm_dev)
366 return 0; 362 return 0;
367 363
368 message.event = PM_EVENT_SUSPEND; 364 message.event = PM_EVENT_SUSPEND;
@@ -373,7 +369,7 @@ static int exynos_drm_sys_resume(struct device *dev)
373{ 369{
374 struct drm_device *drm_dev = dev_get_drvdata(dev); 370 struct drm_device *drm_dev = dev_get_drvdata(dev);
375 371
376 if (pm_runtime_suspended(dev)) 372 if (pm_runtime_suspended(dev) || !drm_dev)
377 return 0; 373 return 0;
378 374
379 return exynos_drm_resume(drm_dev); 375 return exynos_drm_resume(drm_dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 06cde4506278..69a6fa397d75 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -40,8 +40,6 @@ struct drm_device;
40struct exynos_drm_overlay; 40struct exynos_drm_overlay;
41struct drm_connector; 41struct drm_connector;
42 42
43extern unsigned int drm_vblank_offdelay;
44
45/* This enumerates device type. */ 43/* This enumerates device type. */
46enum exynos_drm_device_type { 44enum exynos_drm_device_type {
47 EXYNOS_DEVICE_TYPE_NONE, 45 EXYNOS_DEVICE_TYPE_NONE,
@@ -188,6 +186,8 @@ struct exynos_drm_display {
188 * @win_commit: apply hardware specific overlay data to registers. 186 * @win_commit: apply hardware specific overlay data to registers.
189 * @win_enable: enable hardware specific overlay. 187 * @win_enable: enable hardware specific overlay.
190 * @win_disable: disable hardware specific overlay. 188 * @win_disable: disable hardware specific overlay.
189 * @te_handler: trigger to transfer video image at the tearing effect
190 * synchronization signal if there is a page flip request.
191 */ 191 */
192struct exynos_drm_manager; 192struct exynos_drm_manager;
193struct exynos_drm_manager_ops { 193struct exynos_drm_manager_ops {
@@ -206,6 +206,7 @@ struct exynos_drm_manager_ops {
206 void (*win_commit)(struct exynos_drm_manager *mgr, int zpos); 206 void (*win_commit)(struct exynos_drm_manager *mgr, int zpos);
207 void (*win_enable)(struct exynos_drm_manager *mgr, int zpos); 207 void (*win_enable)(struct exynos_drm_manager *mgr, int zpos);
208 void (*win_disable)(struct exynos_drm_manager *mgr, int zpos); 208 void (*win_disable)(struct exynos_drm_manager *mgr, int zpos);
209 void (*te_handler)(struct exynos_drm_manager *mgr);
209}; 210};
210 211
211/* 212/*
@@ -236,14 +237,9 @@ struct exynos_drm_g2d_private {
236 struct list_head userptr_list; 237 struct list_head userptr_list;
237}; 238};
238 239
239struct exynos_drm_ipp_private {
240 struct device *dev;
241 struct list_head event_list;
242};
243
244struct drm_exynos_file_private { 240struct drm_exynos_file_private {
245 struct exynos_drm_g2d_private *g2d_priv; 241 struct exynos_drm_g2d_private *g2d_priv;
246 struct exynos_drm_ipp_private *ipp_priv; 242 struct device *ipp_dev;
247 struct file *anon_filp; 243 struct file *anon_filp;
248}; 244};
249 245
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 6302aa64f6c1..442aa2d00132 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -16,7 +16,10 @@
16#include <drm/drm_panel.h> 16#include <drm/drm_panel.h>
17 17
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/gpio/consumer.h>
19#include <linux/irq.h> 20#include <linux/irq.h>
21#include <linux/of_device.h>
22#include <linux/of_gpio.h>
20#include <linux/phy/phy.h> 23#include <linux/phy/phy.h>
21#include <linux/regulator/consumer.h> 24#include <linux/regulator/consumer.h>
22#include <linux/component.h> 25#include <linux/component.h>
@@ -24,6 +27,7 @@
24#include <video/mipi_display.h> 27#include <video/mipi_display.h>
25#include <video/videomode.h> 28#include <video/videomode.h>
26 29
30#include "exynos_drm_crtc.h"
27#include "exynos_drm_drv.h" 31#include "exynos_drm_drv.h"
28 32
29/* returns true iff both arguments logically differs */ 33/* returns true iff both arguments logically differs */
@@ -54,9 +58,12 @@
54 58
55/* FIFO memory AC characteristic register */ 59/* FIFO memory AC characteristic register */
56#define DSIM_PLLCTRL_REG 0x4c /* PLL control register */ 60#define DSIM_PLLCTRL_REG 0x4c /* PLL control register */
57#define DSIM_PLLTMR_REG 0x50 /* PLL timer register */
58#define DSIM_PHYACCHR_REG 0x54 /* D-PHY AC characteristic register */ 61#define DSIM_PHYACCHR_REG 0x54 /* D-PHY AC characteristic register */
59#define DSIM_PHYACCHR1_REG 0x58 /* D-PHY AC characteristic register1 */ 62#define DSIM_PHYACCHR1_REG 0x58 /* D-PHY AC characteristic register1 */
63#define DSIM_PHYCTRL_REG 0x5c
64#define DSIM_PHYTIMING_REG 0x64
65#define DSIM_PHYTIMING1_REG 0x68
66#define DSIM_PHYTIMING2_REG 0x6c
60 67
61/* DSIM_STATUS */ 68/* DSIM_STATUS */
62#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0) 69#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
@@ -200,6 +207,24 @@
200#define DSIM_PLL_M(x) ((x) << 4) 207#define DSIM_PLL_M(x) ((x) << 4)
201#define DSIM_PLL_S(x) ((x) << 1) 208#define DSIM_PLL_S(x) ((x) << 1)
202 209
210/* DSIM_PHYCTRL */
211#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0)
212
213/* DSIM_PHYTIMING */
214#define DSIM_PHYTIMING_LPX(x) ((x) << 8)
215#define DSIM_PHYTIMING_HS_EXIT(x) ((x) << 0)
216
217/* DSIM_PHYTIMING1 */
218#define DSIM_PHYTIMING1_CLK_PREPARE(x) ((x) << 24)
219#define DSIM_PHYTIMING1_CLK_ZERO(x) ((x) << 16)
220#define DSIM_PHYTIMING1_CLK_POST(x) ((x) << 8)
221#define DSIM_PHYTIMING1_CLK_TRAIL(x) ((x) << 0)
222
223/* DSIM_PHYTIMING2 */
224#define DSIM_PHYTIMING2_HS_PREPARE(x) ((x) << 16)
225#define DSIM_PHYTIMING2_HS_ZERO(x) ((x) << 8)
226#define DSIM_PHYTIMING2_HS_TRAIL(x) ((x) << 0)
227
203#define DSI_MAX_BUS_WIDTH 4 228#define DSI_MAX_BUS_WIDTH 4
204#define DSI_NUM_VIRTUAL_CHANNELS 4 229#define DSI_NUM_VIRTUAL_CHANNELS 4
205#define DSI_TX_FIFO_SIZE 2048 230#define DSI_TX_FIFO_SIZE 2048
@@ -233,6 +258,12 @@ struct exynos_dsi_transfer {
233#define DSIM_STATE_INITIALIZED BIT(1) 258#define DSIM_STATE_INITIALIZED BIT(1)
234#define DSIM_STATE_CMD_LPM BIT(2) 259#define DSIM_STATE_CMD_LPM BIT(2)
235 260
261struct exynos_dsi_driver_data {
262 unsigned int plltmr_reg;
263
264 unsigned int has_freqband:1;
265};
266
236struct exynos_dsi { 267struct exynos_dsi {
237 struct mipi_dsi_host dsi_host; 268 struct mipi_dsi_host dsi_host;
238 struct drm_connector connector; 269 struct drm_connector connector;
@@ -247,6 +278,7 @@ struct exynos_dsi {
247 struct clk *bus_clk; 278 struct clk *bus_clk;
248 struct regulator_bulk_data supplies[2]; 279 struct regulator_bulk_data supplies[2];
249 int irq; 280 int irq;
281 int te_gpio;
250 282
251 u32 pll_clk_rate; 283 u32 pll_clk_rate;
252 u32 burst_clk_rate; 284 u32 burst_clk_rate;
@@ -262,11 +294,39 @@ struct exynos_dsi {
262 294
263 spinlock_t transfer_lock; /* protects transfer_list */ 295 spinlock_t transfer_lock; /* protects transfer_list */
264 struct list_head transfer_list; 296 struct list_head transfer_list;
297
298 struct exynos_dsi_driver_data *driver_data;
265}; 299};
266 300
267#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host) 301#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
268#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector) 302#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
269 303
304static struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
305 .plltmr_reg = 0x50,
306 .has_freqband = 1,
307};
308
309static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
310 .plltmr_reg = 0x58,
311};
312
313static struct of_device_id exynos_dsi_of_match[] = {
314 { .compatible = "samsung,exynos4210-mipi-dsi",
315 .data = &exynos4_dsi_driver_data },
316 { .compatible = "samsung,exynos5410-mipi-dsi",
317 .data = &exynos5_dsi_driver_data },
318 { }
319};
320
321static inline struct exynos_dsi_driver_data *exynos_dsi_get_driver_data(
322 struct platform_device *pdev)
323{
324 const struct of_device_id *of_id =
325 of_match_device(exynos_dsi_of_match, &pdev->dev);
326
327 return (struct exynos_dsi_driver_data *)of_id->data;
328}
329
270static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi) 330static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi)
271{ 331{
272 if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300))) 332 if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300)))
@@ -340,14 +400,9 @@ static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
340static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi, 400static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
341 unsigned long freq) 401 unsigned long freq)
342{ 402{
343 static const unsigned long freq_bands[] = { 403 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
344 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
345 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
346 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
347 770 * MHZ, 870 * MHZ, 950 * MHZ,
348 };
349 unsigned long fin, fout; 404 unsigned long fin, fout;
350 int timeout, band; 405 int timeout;
351 u8 p, s; 406 u8 p, s;
352 u16 m; 407 u16 m;
353 u32 reg; 408 u32 reg;
@@ -368,18 +423,30 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
368 "failed to find PLL PMS for requested frequency\n"); 423 "failed to find PLL PMS for requested frequency\n");
369 return -EFAULT; 424 return -EFAULT;
370 } 425 }
426 dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s);
371 427
372 for (band = 0; band < ARRAY_SIZE(freq_bands); ++band) 428 writel(500, dsi->reg_base + driver_data->plltmr_reg);
373 if (fout < freq_bands[band]) 429
374 break; 430 reg = DSIM_PLL_EN | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
375 431
376 dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d), band %d\n", fout, 432 if (driver_data->has_freqband) {
377 p, m, s, band); 433 static const unsigned long freq_bands[] = {
434 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
435 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
436 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
437 770 * MHZ, 870 * MHZ, 950 * MHZ,
438 };
439 int band;
378 440
379 writel(500, dsi->reg_base + DSIM_PLLTMR_REG); 441 for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
442 if (fout < freq_bands[band])
443 break;
444
445 dev_dbg(dsi->dev, "band %d\n", band);
446
447 reg |= DSIM_FREQ_BAND(band);
448 }
380 449
381 reg = DSIM_FREQ_BAND(band) | DSIM_PLL_EN
382 | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
383 writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG); 450 writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG);
384 451
385 timeout = 1000; 452 timeout = 1000;
@@ -433,6 +500,59 @@ static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
433 return 0; 500 return 0;
434} 501}
435 502
503static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
504{
505 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
506 u32 reg;
507
508 if (driver_data->has_freqband)
509 return;
510
511 /* B D-PHY: D-PHY Master & Slave Analog Block control */
512 reg = DSIM_PHYCTRL_ULPS_EXIT(0x0af);
513 writel(reg, dsi->reg_base + DSIM_PHYCTRL_REG);
514
515 /*
516 * T LPX: Transmitted length of any Low-Power state period
517 * T HS-EXIT: Time that the transmitter drives LP-11 following a HS
518 * burst
519 */
520 reg = DSIM_PHYTIMING_LPX(0x06) | DSIM_PHYTIMING_HS_EXIT(0x0b);
521 writel(reg, dsi->reg_base + DSIM_PHYTIMING_REG);
522
523 /*
524 * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00
525 * Line state immediately before the HS-0 Line state starting the
526 * HS transmission
527 * T CLK-ZERO: Time that the transmitter drives the HS-0 state prior to
528 * transmitting the Clock.
529 * T CLK_POST: Time that the transmitter continues to send HS clock
530 * after the last associated Data Lane has transitioned to LP Mode
531 * Interval is defined as the period from the end of T HS-TRAIL to
532 * the beginning of T CLK-TRAIL
533 * T CLK-TRAIL: Time that the transmitter drives the HS-0 state after
534 * the last payload clock bit of a HS transmission burst
535 */
536 reg = DSIM_PHYTIMING1_CLK_PREPARE(0x07) |
537 DSIM_PHYTIMING1_CLK_ZERO(0x27) |
538 DSIM_PHYTIMING1_CLK_POST(0x0d) |
539 DSIM_PHYTIMING1_CLK_TRAIL(0x08);
540 writel(reg, dsi->reg_base + DSIM_PHYTIMING1_REG);
541
542 /*
543 * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00
544 * Line state immediately before the HS-0 Line state starting the
545 * HS transmission
546 * T HS-ZERO: Time that the transmitter drives the HS-0 state prior to
547 * transmitting the Sync sequence.
548 * T HS-TRAIL: Time that the transmitter drives the flipped differential
549 * state after last payload data bit of a HS transmission burst
550 */
551 reg = DSIM_PHYTIMING2_HS_PREPARE(0x09) | DSIM_PHYTIMING2_HS_ZERO(0x0d) |
552 DSIM_PHYTIMING2_HS_TRAIL(0x0b);
553 writel(reg, dsi->reg_base + DSIM_PHYTIMING2_REG);
554}
555
436static void exynos_dsi_disable_clock(struct exynos_dsi *dsi) 556static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
437{ 557{
438 u32 reg; 558 u32 reg;
@@ -468,13 +588,20 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
468 /* DSI configuration */ 588 /* DSI configuration */
469 reg = 0; 589 reg = 0;
470 590
591 /*
592 * The first bit of mode_flags specifies display configuration.
593 * If this bit is set[= MIPI_DSI_MODE_VIDEO], dsi will support video
594 * mode, otherwise it will support command mode.
595 */
471 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { 596 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
472 reg |= DSIM_VIDEO_MODE; 597 reg |= DSIM_VIDEO_MODE;
473 598
599 /*
600 * The user manual describes that following bits are ignored in
601 * command mode.
602 */
474 if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH)) 603 if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
475 reg |= DSIM_MFLUSH_VS; 604 reg |= DSIM_MFLUSH_VS;
476 if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
477 reg |= DSIM_EOT_DISABLE;
478 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) 605 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
479 reg |= DSIM_SYNC_INFORM; 606 reg |= DSIM_SYNC_INFORM;
480 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) 607 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
@@ -491,6 +618,9 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
491 reg |= DSIM_HSA_MODE; 618 reg |= DSIM_HSA_MODE;
492 } 619 }
493 620
621 if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
622 reg |= DSIM_EOT_DISABLE;
623
494 switch (dsi->format) { 624 switch (dsi->format) {
495 case MIPI_DSI_FMT_RGB888: 625 case MIPI_DSI_FMT_RGB888:
496 reg |= DSIM_MAIN_PIX_FORMAT_RGB888; 626 reg |= DSIM_MAIN_PIX_FORMAT_RGB888;
@@ -944,17 +1074,90 @@ static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
944 return IRQ_HANDLED; 1074 return IRQ_HANDLED;
945} 1075}
946 1076
1077static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
1078{
1079 struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
1080 struct drm_encoder *encoder = dsi->encoder;
1081
1082 if (dsi->state & DSIM_STATE_ENABLED)
1083 exynos_drm_crtc_te_handler(encoder->crtc);
1084
1085 return IRQ_HANDLED;
1086}
1087
1088static void exynos_dsi_enable_irq(struct exynos_dsi *dsi)
1089{
1090 enable_irq(dsi->irq);
1091
1092 if (gpio_is_valid(dsi->te_gpio))
1093 enable_irq(gpio_to_irq(dsi->te_gpio));
1094}
1095
1096static void exynos_dsi_disable_irq(struct exynos_dsi *dsi)
1097{
1098 if (gpio_is_valid(dsi->te_gpio))
1099 disable_irq(gpio_to_irq(dsi->te_gpio));
1100
1101 disable_irq(dsi->irq);
1102}
1103
947static int exynos_dsi_init(struct exynos_dsi *dsi) 1104static int exynos_dsi_init(struct exynos_dsi *dsi)
948{ 1105{
949 exynos_dsi_enable_clock(dsi);
950 exynos_dsi_reset(dsi); 1106 exynos_dsi_reset(dsi);
951 enable_irq(dsi->irq); 1107 exynos_dsi_enable_irq(dsi);
1108 exynos_dsi_enable_clock(dsi);
952 exynos_dsi_wait_for_reset(dsi); 1109 exynos_dsi_wait_for_reset(dsi);
1110 exynos_dsi_set_phy_ctrl(dsi);
953 exynos_dsi_init_link(dsi); 1111 exynos_dsi_init_link(dsi);
954 1112
955 return 0; 1113 return 0;
956} 1114}
957 1115
1116static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
1117{
1118 int ret;
1119
1120 dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
1121 if (!gpio_is_valid(dsi->te_gpio)) {
1122 dev_err(dsi->dev, "no te-gpios specified\n");
1123 ret = dsi->te_gpio;
1124 goto out;
1125 }
1126
1127 ret = gpio_request_one(dsi->te_gpio, GPIOF_IN, "te_gpio");
1128 if (ret) {
1129 dev_err(dsi->dev, "gpio request failed with %d\n", ret);
1130 goto out;
1131 }
1132
1133 /*
1134 * This TE GPIO IRQ should not be set to IRQ_NOAUTOEN, because panel
1135 * calls drm_panel_init() first then calls mipi_dsi_attach() in probe().
1136 * It means that te_gpio is invalid when exynos_dsi_enable_irq() is
1137 * called by drm_panel_init() before panel is attached.
1138 */
1139 ret = request_threaded_irq(gpio_to_irq(dsi->te_gpio),
1140 exynos_dsi_te_irq_handler, NULL,
1141 IRQF_TRIGGER_RISING, "TE", dsi);
1142 if (ret) {
1143 dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
1144 gpio_free(dsi->te_gpio);
1145 goto out;
1146 }
1147
1148out:
1149 return ret;
1150}
1151
1152static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi)
1153{
1154 if (gpio_is_valid(dsi->te_gpio)) {
1155 free_irq(gpio_to_irq(dsi->te_gpio), dsi);
1156 gpio_free(dsi->te_gpio);
1157 dsi->te_gpio = -ENOENT;
1158 }
1159}
1160
958static int exynos_dsi_host_attach(struct mipi_dsi_host *host, 1161static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
959 struct mipi_dsi_device *device) 1162 struct mipi_dsi_device *device)
960{ 1163{
@@ -968,6 +1171,19 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
968 if (dsi->connector.dev) 1171 if (dsi->connector.dev)
969 drm_helper_hpd_irq_event(dsi->connector.dev); 1172 drm_helper_hpd_irq_event(dsi->connector.dev);
970 1173
1174 /*
1175 * This is a temporary solution and should be made by more generic way.
1176 *
1177 * If attached panel device is for command mode one, dsi should register
1178 * TE interrupt handler.
1179 */
1180 if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO)) {
1181 int ret = exynos_dsi_register_te_irq(dsi);
1182
1183 if (ret)
1184 return ret;
1185 }
1186
971 return 0; 1187 return 0;
972} 1188}
973 1189
@@ -976,6 +1192,8 @@ static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
976{ 1192{
977 struct exynos_dsi *dsi = host_to_dsi(host); 1193 struct exynos_dsi *dsi = host_to_dsi(host);
978 1194
1195 exynos_dsi_unregister_te_irq(dsi);
1196
979 dsi->panel_node = NULL; 1197 dsi->panel_node = NULL;
980 1198
981 if (dsi->connector.dev) 1199 if (dsi->connector.dev)
@@ -1089,7 +1307,7 @@ static void exynos_dsi_poweroff(struct exynos_dsi *dsi)
1089 1307
1090 exynos_dsi_disable_clock(dsi); 1308 exynos_dsi_disable_clock(dsi);
1091 1309
1092 disable_irq(dsi->irq); 1310 exynos_dsi_disable_irq(dsi);
1093 } 1311 }
1094 1312
1095 dsi->state &= ~DSIM_STATE_CMD_LPM; 1313 dsi->state &= ~DSIM_STATE_CMD_LPM;
@@ -1115,7 +1333,7 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
1115 if (ret < 0) 1333 if (ret < 0)
1116 return ret; 1334 return ret;
1117 1335
1118 ret = drm_panel_enable(dsi->panel); 1336 ret = drm_panel_prepare(dsi->panel);
1119 if (ret < 0) { 1337 if (ret < 0) {
1120 exynos_dsi_poweroff(dsi); 1338 exynos_dsi_poweroff(dsi);
1121 return ret; 1339 return ret;
@@ -1124,6 +1342,14 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
1124 exynos_dsi_set_display_mode(dsi); 1342 exynos_dsi_set_display_mode(dsi);
1125 exynos_dsi_set_display_enable(dsi, true); 1343 exynos_dsi_set_display_enable(dsi, true);
1126 1344
1345 ret = drm_panel_enable(dsi->panel);
1346 if (ret < 0) {
1347 exynos_dsi_set_display_enable(dsi, false);
1348 drm_panel_unprepare(dsi->panel);
1349 exynos_dsi_poweroff(dsi);
1350 return ret;
1351 }
1352
1127 dsi->state |= DSIM_STATE_ENABLED; 1353 dsi->state |= DSIM_STATE_ENABLED;
1128 1354
1129 return 0; 1355 return 0;
@@ -1134,8 +1360,9 @@ static void exynos_dsi_disable(struct exynos_dsi *dsi)
1134 if (!(dsi->state & DSIM_STATE_ENABLED)) 1360 if (!(dsi->state & DSIM_STATE_ENABLED))
1135 return; 1361 return;
1136 1362
1137 exynos_dsi_set_display_enable(dsi, false);
1138 drm_panel_disable(dsi->panel); 1363 drm_panel_disable(dsi->panel);
1364 exynos_dsi_set_display_enable(dsi, false);
1365 drm_panel_unprepare(dsi->panel);
1139 exynos_dsi_poweroff(dsi); 1366 exynos_dsi_poweroff(dsi);
1140 1367
1141 dsi->state &= ~DSIM_STATE_ENABLED; 1368 dsi->state &= ~DSIM_STATE_ENABLED;
@@ -1246,7 +1473,7 @@ static int exynos_dsi_create_connector(struct exynos_drm_display *display,
1246 } 1473 }
1247 1474
1248 drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs); 1475 drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
1249 drm_sysfs_connector_add(connector); 1476 drm_connector_register(connector);
1250 drm_mode_connector_attach_encoder(connector, encoder); 1477 drm_mode_connector_attach_encoder(connector, encoder);
1251 1478
1252 return 0; 1479 return 0;
@@ -1278,6 +1505,7 @@ static struct exynos_drm_display exynos_dsi_display = {
1278 .type = EXYNOS_DISPLAY_TYPE_LCD, 1505 .type = EXYNOS_DISPLAY_TYPE_LCD,
1279 .ops = &exynos_dsi_display_ops, 1506 .ops = &exynos_dsi_display_ops,
1280}; 1507};
1508MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
1281 1509
1282/* of_* functions will be removed after merge of of_graph patches */ 1510/* of_* functions will be removed after merge of of_graph patches */
1283static struct device_node * 1511static struct device_node *
@@ -1435,6 +1663,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
1435 goto err_del_component; 1663 goto err_del_component;
1436 } 1664 }
1437 1665
1666 /* To be checked as invalid one */
1667 dsi->te_gpio = -ENOENT;
1668
1438 init_completion(&dsi->completed); 1669 init_completion(&dsi->completed);
1439 spin_lock_init(&dsi->transfer_lock); 1670 spin_lock_init(&dsi->transfer_lock);
1440 INIT_LIST_HEAD(&dsi->transfer_list); 1671 INIT_LIST_HEAD(&dsi->transfer_list);
@@ -1443,6 +1674,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
1443 dsi->dsi_host.dev = &pdev->dev; 1674 dsi->dsi_host.dev = &pdev->dev;
1444 1675
1445 dsi->dev = &pdev->dev; 1676 dsi->dev = &pdev->dev;
1677 dsi->driver_data = exynos_dsi_get_driver_data(pdev);
1446 1678
1447 ret = exynos_dsi_parse_dt(dsi); 1679 ret = exynos_dsi_parse_dt(dsi);
1448 if (ret) 1680 if (ret)
@@ -1525,11 +1757,6 @@ static int exynos_dsi_remove(struct platform_device *pdev)
1525 return 0; 1757 return 0;
1526} 1758}
1527 1759
1528static struct of_device_id exynos_dsi_of_match[] = {
1529 { .compatible = "samsung,exynos4210-mipi-dsi" },
1530 { }
1531};
1532
1533struct platform_driver dsi_driver = { 1760struct platform_driver dsi_driver = {
1534 .probe = exynos_dsi_probe, 1761 .probe = exynos_dsi_probe,
1535 .remove = exynos_dsi_remove, 1762 .remove = exynos_dsi_remove,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index d771b467cf0c..32e63f60e1d1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -225,7 +225,7 @@ out:
225 return ret; 225 return ret;
226} 226}
227 227
228static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = { 228static const struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
229 .fb_probe = exynos_drm_fbdev_create, 229 .fb_probe = exynos_drm_fbdev_create,
230}; 230};
231 231
@@ -266,7 +266,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
266 return -ENOMEM; 266 return -ENOMEM;
267 267
268 private->fb_helper = helper = &fbdev->drm_fb_helper; 268 private->fb_helper = helper = &fbdev->drm_fb_helper;
269 helper->funcs = &exynos_drm_fb_helper_funcs; 269
270 drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs);
270 271
271 num_crtc = dev->mode_config.num_crtc; 272 num_crtc = dev->mode_config.num_crtc;
272 273
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 831dde9034c6..ec7cc9ea50df 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1887,6 +1887,7 @@ static const struct of_device_id fimc_of_match[] = {
1887 { .compatible = "samsung,exynos4212-fimc" }, 1887 { .compatible = "samsung,exynos4212-fimc" },
1888 { }, 1888 { },
1889}; 1889};
1890MODULE_DEVICE_TABLE(of, fimc_of_match);
1890 1891
1891struct platform_driver fimc_driver = { 1892struct platform_driver fimc_driver = {
1892 .probe = fimc_probe, 1893 .probe = fimc_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 33161ad38201..5d09e33fef87 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -20,6 +20,8 @@
20#include <linux/of_device.h> 20#include <linux/of_device.h>
21#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
22#include <linux/component.h> 22#include <linux/component.h>
23#include <linux/mfd/syscon.h>
24#include <linux/regmap.h>
23 25
24#include <video/of_display_timing.h> 26#include <video/of_display_timing.h>
25#include <video/of_videomode.h> 27#include <video/of_videomode.h>
@@ -61,6 +63,24 @@
61/* color key value register for hardware window 1 ~ 4. */ 63/* color key value register for hardware window 1 ~ 4. */
62#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + ((x - 1) * 8)) 64#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + ((x - 1) * 8))
63 65
66/* I80 / RGB trigger control register */
67#define TRIGCON 0x1A4
68#define TRGMODE_I80_RGB_ENABLE_I80 (1 << 0)
69#define SWTRGCMD_I80_RGB_ENABLE (1 << 1)
70
71/* display mode change control register except exynos4 */
72#define VIDOUT_CON 0x000
73#define VIDOUT_CON_F_I80_LDI0 (0x2 << 8)
74
75/* I80 interface control for main LDI register */
76#define I80IFCONFAx(x) (0x1B0 + (x) * 4)
77#define I80IFCONFBx(x) (0x1B8 + (x) * 4)
78#define LCD_CS_SETUP(x) ((x) << 16)
79#define LCD_WR_SETUP(x) ((x) << 12)
80#define LCD_WR_ACTIVE(x) ((x) << 8)
81#define LCD_WR_HOLD(x) ((x) << 4)
82#define I80IFEN_ENABLE (1 << 0)
83
64/* FIMD has totally five hardware windows. */ 84/* FIMD has totally five hardware windows. */
65#define WINDOWS_NR 5 85#define WINDOWS_NR 5
66 86
@@ -68,10 +88,14 @@
68 88
69struct fimd_driver_data { 89struct fimd_driver_data {
70 unsigned int timing_base; 90 unsigned int timing_base;
91 unsigned int lcdblk_offset;
92 unsigned int lcdblk_vt_shift;
93 unsigned int lcdblk_bypass_shift;
71 94
72 unsigned int has_shadowcon:1; 95 unsigned int has_shadowcon:1;
73 unsigned int has_clksel:1; 96 unsigned int has_clksel:1;
74 unsigned int has_limited_fmt:1; 97 unsigned int has_limited_fmt:1;
98 unsigned int has_vidoutcon:1;
75}; 99};
76 100
77static struct fimd_driver_data s3c64xx_fimd_driver_data = { 101static struct fimd_driver_data s3c64xx_fimd_driver_data = {
@@ -82,12 +106,19 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = {
82 106
83static struct fimd_driver_data exynos4_fimd_driver_data = { 107static struct fimd_driver_data exynos4_fimd_driver_data = {
84 .timing_base = 0x0, 108 .timing_base = 0x0,
109 .lcdblk_offset = 0x210,
110 .lcdblk_vt_shift = 10,
111 .lcdblk_bypass_shift = 1,
85 .has_shadowcon = 1, 112 .has_shadowcon = 1,
86}; 113};
87 114
88static struct fimd_driver_data exynos5_fimd_driver_data = { 115static struct fimd_driver_data exynos5_fimd_driver_data = {
89 .timing_base = 0x20000, 116 .timing_base = 0x20000,
117 .lcdblk_offset = 0x214,
118 .lcdblk_vt_shift = 24,
119 .lcdblk_bypass_shift = 15,
90 .has_shadowcon = 1, 120 .has_shadowcon = 1,
121 .has_vidoutcon = 1,
91}; 122};
92 123
93struct fimd_win_data { 124struct fimd_win_data {
@@ -112,15 +143,22 @@ struct fimd_context {
112 struct clk *bus_clk; 143 struct clk *bus_clk;
113 struct clk *lcd_clk; 144 struct clk *lcd_clk;
114 void __iomem *regs; 145 void __iomem *regs;
146 struct regmap *sysreg;
115 struct drm_display_mode mode; 147 struct drm_display_mode mode;
116 struct fimd_win_data win_data[WINDOWS_NR]; 148 struct fimd_win_data win_data[WINDOWS_NR];
117 unsigned int default_win; 149 unsigned int default_win;
118 unsigned long irq_flags; 150 unsigned long irq_flags;
151 u32 vidcon0;
119 u32 vidcon1; 152 u32 vidcon1;
153 u32 vidout_con;
154 u32 i80ifcon;
155 bool i80_if;
120 bool suspended; 156 bool suspended;
121 int pipe; 157 int pipe;
122 wait_queue_head_t wait_vsync_queue; 158 wait_queue_head_t wait_vsync_queue;
123 atomic_t wait_vsync_event; 159 atomic_t wait_vsync_event;
160 atomic_t win_updated;
161 atomic_t triggering;
124 162
125 struct exynos_drm_panel_info panel; 163 struct exynos_drm_panel_info panel;
126 struct fimd_driver_data *driver_data; 164 struct fimd_driver_data *driver_data;
@@ -136,6 +174,7 @@ static const struct of_device_id fimd_driver_dt_match[] = {
136 .data = &exynos5_fimd_driver_data }, 174 .data = &exynos5_fimd_driver_data },
137 {}, 175 {},
138}; 176};
177MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
139 178
140static inline struct fimd_driver_data *drm_fimd_get_driver_data( 179static inline struct fimd_driver_data *drm_fimd_get_driver_data(
141 struct platform_device *pdev) 180 struct platform_device *pdev)
@@ -243,6 +282,14 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
243 unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh; 282 unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh;
244 u32 clkdiv; 283 u32 clkdiv;
245 284
285 if (ctx->i80_if) {
286 /*
287 * The frame done interrupt should be occurred prior to the
288 * next TE signal.
289 */
290 ideal_clk *= 2;
291 }
292
246 /* Find the clock divider value that gets us closest to ideal_clk */ 293 /* Find the clock divider value that gets us closest to ideal_clk */
247 clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->lcd_clk), ideal_clk); 294 clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->lcd_clk), ideal_clk);
248 295
@@ -271,11 +318,10 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
271{ 318{
272 struct fimd_context *ctx = mgr->ctx; 319 struct fimd_context *ctx = mgr->ctx;
273 struct drm_display_mode *mode = &ctx->mode; 320 struct drm_display_mode *mode = &ctx->mode;
274 struct fimd_driver_data *driver_data; 321 struct fimd_driver_data *driver_data = ctx->driver_data;
275 u32 val, clkdiv, vidcon1; 322 void *timing_base = ctx->regs + driver_data->timing_base;
276 int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd; 323 u32 val, clkdiv;
277 324
278 driver_data = ctx->driver_data;
279 if (ctx->suspended) 325 if (ctx->suspended)
280 return; 326 return;
281 327
@@ -283,33 +329,65 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
283 if (mode->htotal == 0 || mode->vtotal == 0) 329 if (mode->htotal == 0 || mode->vtotal == 0)
284 return; 330 return;
285 331
286 /* setup polarity values */ 332 if (ctx->i80_if) {
287 vidcon1 = ctx->vidcon1; 333 val = ctx->i80ifcon | I80IFEN_ENABLE;
288 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 334 writel(val, timing_base + I80IFCONFAx(0));
289 vidcon1 |= VIDCON1_INV_VSYNC; 335
290 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 336 /* disable auto frame rate */
291 vidcon1 |= VIDCON1_INV_HSYNC; 337 writel(0, timing_base + I80IFCONFBx(0));
292 writel(vidcon1, ctx->regs + driver_data->timing_base + VIDCON1); 338
293 339 /* set video type selection to I80 interface */
294 /* setup vertical timing values. */ 340 if (ctx->sysreg && regmap_update_bits(ctx->sysreg,
295 vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; 341 driver_data->lcdblk_offset,
296 vbpd = mode->crtc_vtotal - mode->crtc_vsync_end; 342 0x3 << driver_data->lcdblk_vt_shift,
297 vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay; 343 0x1 << driver_data->lcdblk_vt_shift)) {
298 344 DRM_ERROR("Failed to update sysreg for I80 i/f.\n");
299 val = VIDTCON0_VBPD(vbpd - 1) | 345 return;
300 VIDTCON0_VFPD(vfpd - 1) | 346 }
301 VIDTCON0_VSPW(vsync_len - 1); 347 } else {
302 writel(val, ctx->regs + driver_data->timing_base + VIDTCON0); 348 int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd;
303 349 u32 vidcon1;
304 /* setup horizontal timing values. */ 350
305 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; 351 /* setup polarity values */
306 hbpd = mode->crtc_htotal - mode->crtc_hsync_end; 352 vidcon1 = ctx->vidcon1;
307 hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay; 353 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
308 354 vidcon1 |= VIDCON1_INV_VSYNC;
309 val = VIDTCON1_HBPD(hbpd - 1) | 355 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
310 VIDTCON1_HFPD(hfpd - 1) | 356 vidcon1 |= VIDCON1_INV_HSYNC;
311 VIDTCON1_HSPW(hsync_len - 1); 357 writel(vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
312 writel(val, ctx->regs + driver_data->timing_base + VIDTCON1); 358
359 /* setup vertical timing values. */
360 vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
361 vbpd = mode->crtc_vtotal - mode->crtc_vsync_end;
362 vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay;
363
364 val = VIDTCON0_VBPD(vbpd - 1) |
365 VIDTCON0_VFPD(vfpd - 1) |
366 VIDTCON0_VSPW(vsync_len - 1);
367 writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
368
369 /* setup horizontal timing values. */
370 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
371 hbpd = mode->crtc_htotal - mode->crtc_hsync_end;
372 hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay;
373
374 val = VIDTCON1_HBPD(hbpd - 1) |
375 VIDTCON1_HFPD(hfpd - 1) |
376 VIDTCON1_HSPW(hsync_len - 1);
377 writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
378 }
379
380 if (driver_data->has_vidoutcon)
381 writel(ctx->vidout_con, timing_base + VIDOUT_CON);
382
383 /* set bypass selection */
384 if (ctx->sysreg && regmap_update_bits(ctx->sysreg,
385 driver_data->lcdblk_offset,
386 0x1 << driver_data->lcdblk_bypass_shift,
387 0x1 << driver_data->lcdblk_bypass_shift)) {
388 DRM_ERROR("Failed to update sysreg for bypass setting.\n");
389 return;
390 }
313 391
314 /* setup horizontal and vertical display size. */ 392 /* setup horizontal and vertical display size. */
315 val = VIDTCON2_LINEVAL(mode->vdisplay - 1) | 393 val = VIDTCON2_LINEVAL(mode->vdisplay - 1) |
@@ -322,7 +400,8 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
322 * fields of register with prefix '_F' would be updated 400 * fields of register with prefix '_F' would be updated
323 * at vsync(same as dma start) 401 * at vsync(same as dma start)
324 */ 402 */
325 val = VIDCON0_ENVID | VIDCON0_ENVID_F; 403 val = ctx->vidcon0;
404 val |= VIDCON0_ENVID | VIDCON0_ENVID_F;
326 405
327 if (ctx->driver_data->has_clksel) 406 if (ctx->driver_data->has_clksel)
328 val |= VIDCON0_CLKSEL_LCD; 407 val |= VIDCON0_CLKSEL_LCD;
@@ -660,6 +739,9 @@ static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
660 } 739 }
661 740
662 win_data->enabled = true; 741 win_data->enabled = true;
742
743 if (ctx->i80_if)
744 atomic_set(&ctx->win_updated, 1);
663} 745}
664 746
665static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos) 747static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
@@ -838,6 +920,58 @@ static void fimd_dpms(struct exynos_drm_manager *mgr, int mode)
838 } 920 }
839} 921}
840 922
923static void fimd_trigger(struct device *dev)
924{
925 struct exynos_drm_manager *mgr = get_fimd_manager(dev);
926 struct fimd_context *ctx = mgr->ctx;
927 struct fimd_driver_data *driver_data = ctx->driver_data;
928 void *timing_base = ctx->regs + driver_data->timing_base;
929 u32 reg;
930
931 atomic_set(&ctx->triggering, 1);
932
933 reg = readl(ctx->regs + VIDINTCON0);
934 reg |= (VIDINTCON0_INT_ENABLE | VIDINTCON0_INT_I80IFDONE |
935 VIDINTCON0_INT_SYSMAINCON);
936 writel(reg, ctx->regs + VIDINTCON0);
937
938 reg = readl(timing_base + TRIGCON);
939 reg |= (TRGMODE_I80_RGB_ENABLE_I80 | SWTRGCMD_I80_RGB_ENABLE);
940 writel(reg, timing_base + TRIGCON);
941}
942
943static void fimd_te_handler(struct exynos_drm_manager *mgr)
944{
945 struct fimd_context *ctx = mgr->ctx;
946
947 /* Checks the crtc is detached already from encoder */
948 if (ctx->pipe < 0 || !ctx->drm_dev)
949 return;
950
951 /*
952 * Skips to trigger if in triggering state, because multiple triggering
953 * requests can cause panel reset.
954 */
955 if (atomic_read(&ctx->triggering))
956 return;
957
958 /*
959 * If there is a page flip request, triggers and handles the page flip
960 * event so that current fb can be updated into panel GRAM.
961 */
962 if (atomic_add_unless(&ctx->win_updated, -1, 0))
963 fimd_trigger(ctx->dev);
964
965 /* Wakes up vsync event queue */
966 if (atomic_read(&ctx->wait_vsync_event)) {
967 atomic_set(&ctx->wait_vsync_event, 0);
968 wake_up(&ctx->wait_vsync_queue);
969
970 if (!atomic_read(&ctx->triggering))
971 drm_handle_vblank(ctx->drm_dev, ctx->pipe);
972 }
973}
974
841static struct exynos_drm_manager_ops fimd_manager_ops = { 975static struct exynos_drm_manager_ops fimd_manager_ops = {
842 .dpms = fimd_dpms, 976 .dpms = fimd_dpms,
843 .mode_fixup = fimd_mode_fixup, 977 .mode_fixup = fimd_mode_fixup,
@@ -849,6 +983,7 @@ static struct exynos_drm_manager_ops fimd_manager_ops = {
849 .win_mode_set = fimd_win_mode_set, 983 .win_mode_set = fimd_win_mode_set,
850 .win_commit = fimd_win_commit, 984 .win_commit = fimd_win_commit,
851 .win_disable = fimd_win_disable, 985 .win_disable = fimd_win_disable,
986 .te_handler = fimd_te_handler,
852}; 987};
853 988
854static struct exynos_drm_manager fimd_manager = { 989static struct exynos_drm_manager fimd_manager = {
@@ -859,26 +994,40 @@ static struct exynos_drm_manager fimd_manager = {
859static irqreturn_t fimd_irq_handler(int irq, void *dev_id) 994static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
860{ 995{
861 struct fimd_context *ctx = (struct fimd_context *)dev_id; 996 struct fimd_context *ctx = (struct fimd_context *)dev_id;
862 u32 val; 997 u32 val, clear_bit;
863 998
864 val = readl(ctx->regs + VIDINTCON1); 999 val = readl(ctx->regs + VIDINTCON1);
865 1000
866 if (val & VIDINTCON1_INT_FRAME) 1001 clear_bit = ctx->i80_if ? VIDINTCON1_INT_I80 : VIDINTCON1_INT_FRAME;
867 /* VSYNC interrupt */ 1002 if (val & clear_bit)
868 writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1); 1003 writel(clear_bit, ctx->regs + VIDINTCON1);
869 1004
870 /* check the crtc is detached already from encoder */ 1005 /* check the crtc is detached already from encoder */
871 if (ctx->pipe < 0 || !ctx->drm_dev) 1006 if (ctx->pipe < 0 || !ctx->drm_dev)
872 goto out; 1007 goto out;
873 1008
874 drm_handle_vblank(ctx->drm_dev, ctx->pipe); 1009 if (ctx->i80_if) {
875 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); 1010 /* unset I80 frame done interrupt */
1011 val = readl(ctx->regs + VIDINTCON0);
1012 val &= ~(VIDINTCON0_INT_I80IFDONE | VIDINTCON0_INT_SYSMAINCON);
1013 writel(val, ctx->regs + VIDINTCON0);
876 1014
877 /* set wait vsync event to zero and wake up queue. */ 1015 /* exit triggering mode */
878 if (atomic_read(&ctx->wait_vsync_event)) { 1016 atomic_set(&ctx->triggering, 0);
879 atomic_set(&ctx->wait_vsync_event, 0); 1017
880 wake_up(&ctx->wait_vsync_queue); 1018 drm_handle_vblank(ctx->drm_dev, ctx->pipe);
1019 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
1020 } else {
1021 drm_handle_vblank(ctx->drm_dev, ctx->pipe);
1022 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
1023
1024 /* set wait vsync event to zero and wake up queue. */
1025 if (atomic_read(&ctx->wait_vsync_event)) {
1026 atomic_set(&ctx->wait_vsync_event, 0);
1027 wake_up(&ctx->wait_vsync_queue);
1028 }
881 } 1029 }
1030
882out: 1031out:
883 return IRQ_HANDLED; 1032 return IRQ_HANDLED;
884} 1033}
@@ -923,6 +1072,7 @@ static int fimd_probe(struct platform_device *pdev)
923{ 1072{
924 struct device *dev = &pdev->dev; 1073 struct device *dev = &pdev->dev;
925 struct fimd_context *ctx; 1074 struct fimd_context *ctx;
1075 struct device_node *i80_if_timings;
926 struct resource *res; 1076 struct resource *res;
927 int ret = -EINVAL; 1077 int ret = -EINVAL;
928 1078
@@ -944,12 +1094,51 @@ static int fimd_probe(struct platform_device *pdev)
944 1094
945 ctx->dev = dev; 1095 ctx->dev = dev;
946 ctx->suspended = true; 1096 ctx->suspended = true;
1097 ctx->driver_data = drm_fimd_get_driver_data(pdev);
947 1098
948 if (of_property_read_bool(dev->of_node, "samsung,invert-vden")) 1099 if (of_property_read_bool(dev->of_node, "samsung,invert-vden"))
949 ctx->vidcon1 |= VIDCON1_INV_VDEN; 1100 ctx->vidcon1 |= VIDCON1_INV_VDEN;
950 if (of_property_read_bool(dev->of_node, "samsung,invert-vclk")) 1101 if (of_property_read_bool(dev->of_node, "samsung,invert-vclk"))
951 ctx->vidcon1 |= VIDCON1_INV_VCLK; 1102 ctx->vidcon1 |= VIDCON1_INV_VCLK;
952 1103
1104 i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
1105 if (i80_if_timings) {
1106 u32 val;
1107
1108 ctx->i80_if = true;
1109
1110 if (ctx->driver_data->has_vidoutcon)
1111 ctx->vidout_con |= VIDOUT_CON_F_I80_LDI0;
1112 else
1113 ctx->vidcon0 |= VIDCON0_VIDOUT_I80_LDI0;
1114 /*
1115 * The user manual describes that this "DSI_EN" bit is required
1116 * to enable I80 24-bit data interface.
1117 */
1118 ctx->vidcon0 |= VIDCON0_DSI_EN;
1119
1120 if (of_property_read_u32(i80_if_timings, "cs-setup", &val))
1121 val = 0;
1122 ctx->i80ifcon = LCD_CS_SETUP(val);
1123 if (of_property_read_u32(i80_if_timings, "wr-setup", &val))
1124 val = 0;
1125 ctx->i80ifcon |= LCD_WR_SETUP(val);
1126 if (of_property_read_u32(i80_if_timings, "wr-active", &val))
1127 val = 1;
1128 ctx->i80ifcon |= LCD_WR_ACTIVE(val);
1129 if (of_property_read_u32(i80_if_timings, "wr-hold", &val))
1130 val = 0;
1131 ctx->i80ifcon |= LCD_WR_HOLD(val);
1132 }
1133 of_node_put(i80_if_timings);
1134
1135 ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
1136 "samsung,sysreg");
1137 if (IS_ERR(ctx->sysreg)) {
1138 dev_warn(dev, "failed to get system register.\n");
1139 ctx->sysreg = NULL;
1140 }
1141
953 ctx->bus_clk = devm_clk_get(dev, "fimd"); 1142 ctx->bus_clk = devm_clk_get(dev, "fimd");
954 if (IS_ERR(ctx->bus_clk)) { 1143 if (IS_ERR(ctx->bus_clk)) {
955 dev_err(dev, "failed to get bus clock\n"); 1144 dev_err(dev, "failed to get bus clock\n");
@@ -972,7 +1161,8 @@ static int fimd_probe(struct platform_device *pdev)
972 goto err_del_component; 1161 goto err_del_component;
973 } 1162 }
974 1163
975 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vsync"); 1164 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1165 ctx->i80_if ? "lcd_sys" : "vsync");
976 if (!res) { 1166 if (!res) {
977 dev_err(dev, "irq request failed.\n"); 1167 dev_err(dev, "irq request failed.\n");
978 ret = -ENXIO; 1168 ret = -ENXIO;
@@ -986,7 +1176,6 @@ static int fimd_probe(struct platform_device *pdev)
986 goto err_del_component; 1176 goto err_del_component;
987 } 1177 }
988 1178
989 ctx->driver_data = drm_fimd_get_driver_data(pdev);
990 init_waitqueue_head(&ctx->wait_vsync_queue); 1179 init_waitqueue_head(&ctx->wait_vsync_queue);
991 atomic_set(&ctx->wait_vsync_event, 0); 1180 atomic_set(&ctx->wait_vsync_event, 0);
992 1181
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 800158714473..df7a77d3eff8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1042,8 +1042,23 @@ err:
1042int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data, 1042int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
1043 struct drm_file *file) 1043 struct drm_file *file)
1044{ 1044{
1045 struct drm_exynos_file_private *file_priv = file->driver_priv;
1046 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
1047 struct device *dev;
1048 struct g2d_data *g2d;
1045 struct drm_exynos_g2d_get_ver *ver = data; 1049 struct drm_exynos_g2d_get_ver *ver = data;
1046 1050
1051 if (!g2d_priv)
1052 return -ENODEV;
1053
1054 dev = g2d_priv->dev;
1055 if (!dev)
1056 return -ENODEV;
1057
1058 g2d = dev_get_drvdata(dev);
1059 if (!g2d)
1060 return -EFAULT;
1061
1047 ver->major = G2D_HW_MAJOR_VER; 1062 ver->major = G2D_HW_MAJOR_VER;
1048 ver->minor = G2D_HW_MINOR_VER; 1063 ver->minor = G2D_HW_MINOR_VER;
1049 1064
@@ -1056,7 +1071,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
1056{ 1071{
1057 struct drm_exynos_file_private *file_priv = file->driver_priv; 1072 struct drm_exynos_file_private *file_priv = file->driver_priv;
1058 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; 1073 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
1059 struct device *dev = g2d_priv->dev; 1074 struct device *dev;
1060 struct g2d_data *g2d; 1075 struct g2d_data *g2d;
1061 struct drm_exynos_g2d_set_cmdlist *req = data; 1076 struct drm_exynos_g2d_set_cmdlist *req = data;
1062 struct drm_exynos_g2d_cmd *cmd; 1077 struct drm_exynos_g2d_cmd *cmd;
@@ -1067,6 +1082,10 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
1067 int size; 1082 int size;
1068 int ret; 1083 int ret;
1069 1084
1085 if (!g2d_priv)
1086 return -ENODEV;
1087
1088 dev = g2d_priv->dev;
1070 if (!dev) 1089 if (!dev)
1071 return -ENODEV; 1090 return -ENODEV;
1072 1091
@@ -1223,13 +1242,17 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
1223{ 1242{
1224 struct drm_exynos_file_private *file_priv = file->driver_priv; 1243 struct drm_exynos_file_private *file_priv = file->driver_priv;
1225 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; 1244 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
1226 struct device *dev = g2d_priv->dev; 1245 struct device *dev;
1227 struct g2d_data *g2d; 1246 struct g2d_data *g2d;
1228 struct drm_exynos_g2d_exec *req = data; 1247 struct drm_exynos_g2d_exec *req = data;
1229 struct g2d_runqueue_node *runqueue_node; 1248 struct g2d_runqueue_node *runqueue_node;
1230 struct list_head *run_cmdlist; 1249 struct list_head *run_cmdlist;
1231 struct list_head *event_list; 1250 struct list_head *event_list;
1232 1251
1252 if (!g2d_priv)
1253 return -ENODEV;
1254
1255 dev = g2d_priv->dev;
1233 if (!dev) 1256 if (!dev)
1234 return -ENODEV; 1257 return -ENODEV;
1235 1258
@@ -1544,8 +1567,10 @@ static const struct dev_pm_ops g2d_pm_ops = {
1544 1567
1545static const struct of_device_id exynos_g2d_match[] = { 1568static const struct of_device_id exynos_g2d_match[] = {
1546 { .compatible = "samsung,exynos5250-g2d" }, 1569 { .compatible = "samsung,exynos5250-g2d" },
1570 { .compatible = "samsung,exynos4212-g2d" },
1547 {}, 1571 {},
1548}; 1572};
1573MODULE_DEVICE_TABLE(of, exynos_g2d_match);
1549 1574
1550struct platform_driver g2d_driver = { 1575struct platform_driver g2d_driver = {
1551 .probe = g2d_probe, 1576 .probe = g2d_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 163a054922cb..15db80138382 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -301,7 +301,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
301 unsigned int gem_handle, 301 unsigned int gem_handle,
302 struct drm_file *filp) 302 struct drm_file *filp)
303{ 303{
304 struct exynos_drm_gem_obj *exynos_gem_obj;
305 struct drm_gem_object *obj; 304 struct drm_gem_object *obj;
306 305
307 obj = drm_gem_object_lookup(dev, filp, gem_handle); 306 obj = drm_gem_object_lookup(dev, filp, gem_handle);
@@ -310,8 +309,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
310 return; 309 return;
311 } 310 }
312 311
313 exynos_gem_obj = to_exynos_gem_obj(obj);
314
315 drm_gem_object_unreference_unlocked(obj); 312 drm_gem_object_unreference_unlocked(obj);
316 313
317 /* 314 /*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index a1888e128f1d..c411399070d6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -129,9 +129,6 @@ void exynos_platform_device_ipp_unregister(void)
129 129
130int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv) 130int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
131{ 131{
132 if (!ippdrv)
133 return -EINVAL;
134
135 mutex_lock(&exynos_drm_ippdrv_lock); 132 mutex_lock(&exynos_drm_ippdrv_lock);
136 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list); 133 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
137 mutex_unlock(&exynos_drm_ippdrv_lock); 134 mutex_unlock(&exynos_drm_ippdrv_lock);
@@ -141,9 +138,6 @@ int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
141 138
142int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv) 139int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
143{ 140{
144 if (!ippdrv)
145 return -EINVAL;
146
147 mutex_lock(&exynos_drm_ippdrv_lock); 141 mutex_lock(&exynos_drm_ippdrv_lock);
148 list_del(&ippdrv->drv_list); 142 list_del(&ippdrv->drv_list);
149 mutex_unlock(&exynos_drm_ippdrv_lock); 143 mutex_unlock(&exynos_drm_ippdrv_lock);
@@ -151,20 +145,15 @@ int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
151 return 0; 145 return 0;
152} 146}
153 147
154static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj, 148static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj)
155 u32 *idp)
156{ 149{
157 int ret; 150 int ret;
158 151
159 /* do the allocation under our mutexlock */
160 mutex_lock(lock); 152 mutex_lock(lock);
161 ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL); 153 ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
162 mutex_unlock(lock); 154 mutex_unlock(lock);
163 if (ret < 0)
164 return ret;
165 155
166 *idp = ret; 156 return ret;
167 return 0;
168} 157}
169 158
170static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id) 159static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
@@ -178,35 +167,25 @@ static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
178{ 167{
179 void *obj; 168 void *obj;
180 169
181 DRM_DEBUG_KMS("id[%d]\n", id);
182
183 mutex_lock(lock); 170 mutex_lock(lock);
184
185 /* find object using handle */
186 obj = idr_find(id_idr, id); 171 obj = idr_find(id_idr, id);
187 if (!obj) {
188 DRM_ERROR("failed to find object.\n");
189 mutex_unlock(lock);
190 return ERR_PTR(-ENODEV);
191 }
192
193 mutex_unlock(lock); 172 mutex_unlock(lock);
194 173
195 return obj; 174 return obj;
196} 175}
197 176
198static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv, 177static int ipp_check_driver(struct exynos_drm_ippdrv *ippdrv,
199 enum drm_exynos_ipp_cmd cmd) 178 struct drm_exynos_ipp_property *property)
200{ 179{
201 /* 180 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(property->cmd) &&
202 * check dedicated flag and WB, OUTPUT operation with 181 !pm_runtime_suspended(ippdrv->dev)))
203 * power on state. 182 return -EBUSY;
204 */
205 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
206 !pm_runtime_suspended(ippdrv->dev)))
207 return true;
208 183
209 return false; 184 if (ippdrv->check_property &&
185 ippdrv->check_property(ippdrv->dev, property))
186 return -EINVAL;
187
188 return 0;
210} 189}
211 190
212static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx, 191static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
@@ -214,62 +193,30 @@ static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
214{ 193{
215 struct exynos_drm_ippdrv *ippdrv; 194 struct exynos_drm_ippdrv *ippdrv;
216 u32 ipp_id = property->ipp_id; 195 u32 ipp_id = property->ipp_id;
217 196 int ret;
218 DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id);
219 197
220 if (ipp_id) { 198 if (ipp_id) {
221 /* find ipp driver using idr */ 199 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id);
222 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, 200 if (!ippdrv) {
223 ipp_id); 201 DRM_DEBUG("ipp%d driver not found\n", ipp_id);
224 if (IS_ERR(ippdrv)) { 202 return ERR_PTR(-ENODEV);
225 DRM_ERROR("not found ipp%d driver.\n", ipp_id);
226 return ippdrv;
227 } 203 }
228 204
229 /* 205 ret = ipp_check_driver(ippdrv, property);
230 * WB, OUTPUT opertion not supported multi-operation. 206 if (ret < 0) {
231 * so, make dedicated state at set property ioctl. 207 DRM_DEBUG("ipp%d driver check error %d\n", ipp_id, ret);
232 * when ipp driver finished operations, clear dedicated flags. 208 return ERR_PTR(ret);
233 */
234 if (ipp_check_dedicated(ippdrv, property->cmd)) {
235 DRM_ERROR("already used choose device.\n");
236 return ERR_PTR(-EBUSY);
237 }
238
239 /*
240 * This is necessary to find correct device in ipp drivers.
241 * ipp drivers have different abilities,
242 * so need to check property.
243 */
244 if (ippdrv->check_property &&
245 ippdrv->check_property(ippdrv->dev, property)) {
246 DRM_ERROR("not support property.\n");
247 return ERR_PTR(-EINVAL);
248 } 209 }
249 210
250 return ippdrv; 211 return ippdrv;
251 } else { 212 } else {
252 /*
253 * This case is search all ipp driver for finding.
254 * user application don't set ipp_id in this case,
255 * so ipp subsystem search correct driver in driver list.
256 */
257 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 213 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
258 if (ipp_check_dedicated(ippdrv, property->cmd)) { 214 ret = ipp_check_driver(ippdrv, property);
259 DRM_DEBUG_KMS("used device.\n"); 215 if (ret == 0)
260 continue; 216 return ippdrv;
261 }
262
263 if (ippdrv->check_property &&
264 ippdrv->check_property(ippdrv->dev, property)) {
265 DRM_DEBUG_KMS("not support property.\n");
266 continue;
267 }
268
269 return ippdrv;
270 } 217 }
271 218
272 DRM_ERROR("not support ipp driver operations.\n"); 219 DRM_DEBUG("cannot find driver suitable for given property.\n");
273 } 220 }
274 221
275 return ERR_PTR(-ENODEV); 222 return ERR_PTR(-ENODEV);
@@ -308,8 +255,7 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
308 struct drm_file *file) 255 struct drm_file *file)
309{ 256{
310 struct drm_exynos_file_private *file_priv = file->driver_priv; 257 struct drm_exynos_file_private *file_priv = file->driver_priv;
311 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; 258 struct device *dev = file_priv->ipp_dev;
312 struct device *dev = priv->dev;
313 struct ipp_context *ctx = get_ipp_context(dev); 259 struct ipp_context *ctx = get_ipp_context(dev);
314 struct drm_exynos_ipp_prop_list *prop_list = data; 260 struct drm_exynos_ipp_prop_list *prop_list = data;
315 struct exynos_drm_ippdrv *ippdrv; 261 struct exynos_drm_ippdrv *ippdrv;
@@ -346,10 +292,10 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
346 */ 292 */
347 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, 293 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
348 prop_list->ipp_id); 294 prop_list->ipp_id);
349 if (IS_ERR(ippdrv)) { 295 if (!ippdrv) {
350 DRM_ERROR("not found ipp%d driver.\n", 296 DRM_ERROR("not found ipp%d driver.\n",
351 prop_list->ipp_id); 297 prop_list->ipp_id);
352 return PTR_ERR(ippdrv); 298 return -ENODEV;
353 } 299 }
354 300
355 *prop_list = ippdrv->prop_list; 301 *prop_list = ippdrv->prop_list;
@@ -432,7 +378,7 @@ static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
432 if (!event_work) 378 if (!event_work)
433 return ERR_PTR(-ENOMEM); 379 return ERR_PTR(-ENOMEM);
434 380
435 INIT_WORK((struct work_struct *)event_work, ipp_sched_event); 381 INIT_WORK(&event_work->work, ipp_sched_event);
436 382
437 return event_work; 383 return event_work;
438} 384}
@@ -441,8 +387,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
441 struct drm_file *file) 387 struct drm_file *file)
442{ 388{
443 struct drm_exynos_file_private *file_priv = file->driver_priv; 389 struct drm_exynos_file_private *file_priv = file->driver_priv;
444 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; 390 struct device *dev = file_priv->ipp_dev;
445 struct device *dev = priv->dev;
446 struct ipp_context *ctx = get_ipp_context(dev); 391 struct ipp_context *ctx = get_ipp_context(dev);
447 struct drm_exynos_ipp_property *property = data; 392 struct drm_exynos_ipp_property *property = data;
448 struct exynos_drm_ippdrv *ippdrv; 393 struct exynos_drm_ippdrv *ippdrv;
@@ -489,19 +434,18 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
489 if (!c_node) 434 if (!c_node)
490 return -ENOMEM; 435 return -ENOMEM;
491 436
492 /* create property id */ 437 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node);
493 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node, 438 if (ret < 0) {
494 &property->prop_id);
495 if (ret) {
496 DRM_ERROR("failed to create id.\n"); 439 DRM_ERROR("failed to create id.\n");
497 goto err_clear; 440 goto err_clear;
498 } 441 }
442 property->prop_id = ret;
499 443
500 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n", 444 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
501 property->prop_id, property->cmd, (int)ippdrv); 445 property->prop_id, property->cmd, (int)ippdrv);
502 446
503 /* stored property information and ippdrv in private data */ 447 /* stored property information and ippdrv in private data */
504 c_node->priv = priv; 448 c_node->dev = dev;
505 c_node->property = *property; 449 c_node->property = *property;
506 c_node->state = IPP_STATE_IDLE; 450 c_node->state = IPP_STATE_IDLE;
507 451
@@ -534,7 +478,6 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
534 INIT_LIST_HEAD(&c_node->mem_list[i]); 478 INIT_LIST_HEAD(&c_node->mem_list[i]);
535 479
536 INIT_LIST_HEAD(&c_node->event_list); 480 INIT_LIST_HEAD(&c_node->event_list);
537 list_splice_init(&priv->event_list, &c_node->event_list);
538 mutex_lock(&ippdrv->cmd_lock); 481 mutex_lock(&ippdrv->cmd_lock);
539 list_add_tail(&c_node->list, &ippdrv->cmd_list); 482 list_add_tail(&c_node->list, &ippdrv->cmd_list);
540 mutex_unlock(&ippdrv->cmd_lock); 483 mutex_unlock(&ippdrv->cmd_lock);
@@ -577,42 +520,18 @@ static void ipp_clean_cmd_node(struct ipp_context *ctx,
577 kfree(c_node); 520 kfree(c_node);
578} 521}
579 522
580static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node) 523static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
581{ 524{
582 struct drm_exynos_ipp_property *property = &c_node->property; 525 switch (c_node->property.cmd) {
583 struct drm_exynos_ipp_mem_node *m_node; 526 case IPP_CMD_WB:
584 struct list_head *head; 527 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
585 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, }; 528 case IPP_CMD_OUTPUT:
586 529 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
587 for_each_ipp_ops(i) { 530 case IPP_CMD_M2M:
588 /* source/destination memory list */ 531 default:
589 head = &c_node->mem_list[i]; 532 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
590 533 !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
591 /* find memory node entry */
592 list_for_each_entry(m_node, head, list) {
593 DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n",
594 i ? "dst" : "src", count[i], (int)m_node);
595 count[i]++;
596 }
597 } 534 }
598
599 DRM_DEBUG_KMS("min[%d]max[%d]\n",
600 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
601 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
602
603 /*
604 * M2M operations should be need paired memory address.
605 * so, need to check minimum count about src, dst.
606 * other case not use paired memory, so use maximum count
607 */
608 if (ipp_is_m2m_cmd(property->cmd))
609 ret = min(count[EXYNOS_DRM_OPS_SRC],
610 count[EXYNOS_DRM_OPS_DST]);
611 else
612 ret = max(count[EXYNOS_DRM_OPS_SRC],
613 count[EXYNOS_DRM_OPS_DST]);
614
615 return ret;
616} 535}
617 536
618static struct drm_exynos_ipp_mem_node 537static struct drm_exynos_ipp_mem_node
@@ -683,16 +602,14 @@ static struct drm_exynos_ipp_mem_node
683 struct drm_exynos_ipp_queue_buf *qbuf) 602 struct drm_exynos_ipp_queue_buf *qbuf)
684{ 603{
685 struct drm_exynos_ipp_mem_node *m_node; 604 struct drm_exynos_ipp_mem_node *m_node;
686 struct drm_exynos_ipp_buf_info buf_info; 605 struct drm_exynos_ipp_buf_info *buf_info;
687 void *addr;
688 int i; 606 int i;
689 607
690 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL); 608 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
691 if (!m_node) 609 if (!m_node)
692 return ERR_PTR(-ENOMEM); 610 return ERR_PTR(-ENOMEM);
693 611
694 /* clear base address for error handling */ 612 buf_info = &m_node->buf_info;
695 memset(&buf_info, 0x0, sizeof(buf_info));
696 613
697 /* operations, buffer id */ 614 /* operations, buffer id */
698 m_node->ops_id = qbuf->ops_id; 615 m_node->ops_id = qbuf->ops_id;
@@ -707,6 +624,8 @@ static struct drm_exynos_ipp_mem_node
707 624
708 /* get dma address by handle */ 625 /* get dma address by handle */
709 if (qbuf->handle[i]) { 626 if (qbuf->handle[i]) {
627 dma_addr_t *addr;
628
710 addr = exynos_drm_gem_get_dma_addr(drm_dev, 629 addr = exynos_drm_gem_get_dma_addr(drm_dev,
711 qbuf->handle[i], file); 630 qbuf->handle[i], file);
712 if (IS_ERR(addr)) { 631 if (IS_ERR(addr)) {
@@ -714,15 +633,14 @@ static struct drm_exynos_ipp_mem_node
714 goto err_clear; 633 goto err_clear;
715 } 634 }
716 635
717 buf_info.handles[i] = qbuf->handle[i]; 636 buf_info->handles[i] = qbuf->handle[i];
718 buf_info.base[i] = *(dma_addr_t *) addr; 637 buf_info->base[i] = *addr;
719 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%x]\n", 638 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
720 i, buf_info.base[i], (int)buf_info.handles[i]); 639 buf_info->base[i], buf_info->handles[i]);
721 } 640 }
722 } 641 }
723 642
724 m_node->filp = file; 643 m_node->filp = file;
725 m_node->buf_info = buf_info;
726 mutex_lock(&c_node->mem_lock); 644 mutex_lock(&c_node->mem_lock);
727 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]); 645 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
728 mutex_unlock(&c_node->mem_lock); 646 mutex_unlock(&c_node->mem_lock);
@@ -930,8 +848,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
930 struct drm_file *file) 848 struct drm_file *file)
931{ 849{
932 struct drm_exynos_file_private *file_priv = file->driver_priv; 850 struct drm_exynos_file_private *file_priv = file->driver_priv;
933 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; 851 struct device *dev = file_priv->ipp_dev;
934 struct device *dev = priv->dev;
935 struct ipp_context *ctx = get_ipp_context(dev); 852 struct ipp_context *ctx = get_ipp_context(dev);
936 struct drm_exynos_ipp_queue_buf *qbuf = data; 853 struct drm_exynos_ipp_queue_buf *qbuf = data;
937 struct drm_exynos_ipp_cmd_node *c_node; 854 struct drm_exynos_ipp_cmd_node *c_node;
@@ -955,9 +872,9 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
955 /* find command node */ 872 /* find command node */
956 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 873 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
957 qbuf->prop_id); 874 qbuf->prop_id);
958 if (IS_ERR(c_node)) { 875 if (!c_node) {
959 DRM_ERROR("failed to get command node.\n"); 876 DRM_ERROR("failed to get command node.\n");
960 return PTR_ERR(c_node); 877 return -ENODEV;
961 } 878 }
962 879
963 /* buffer control */ 880 /* buffer control */
@@ -1062,9 +979,8 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1062 struct drm_file *file) 979 struct drm_file *file)
1063{ 980{
1064 struct drm_exynos_file_private *file_priv = file->driver_priv; 981 struct drm_exynos_file_private *file_priv = file->driver_priv;
1065 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1066 struct exynos_drm_ippdrv *ippdrv = NULL; 982 struct exynos_drm_ippdrv *ippdrv = NULL;
1067 struct device *dev = priv->dev; 983 struct device *dev = file_priv->ipp_dev;
1068 struct ipp_context *ctx = get_ipp_context(dev); 984 struct ipp_context *ctx = get_ipp_context(dev);
1069 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data; 985 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1070 struct drm_exynos_ipp_cmd_work *cmd_work; 986 struct drm_exynos_ipp_cmd_work *cmd_work;
@@ -1091,9 +1007,9 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1091 1007
1092 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 1008 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1093 cmd_ctrl->prop_id); 1009 cmd_ctrl->prop_id);
1094 if (IS_ERR(c_node)) { 1010 if (!c_node) {
1095 DRM_ERROR("invalid command node list.\n"); 1011 DRM_ERROR("invalid command node list.\n");
1096 return PTR_ERR(c_node); 1012 return -ENODEV;
1097 } 1013 }
1098 1014
1099 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, 1015 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
@@ -1198,7 +1114,6 @@ static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1198 /* reset h/w block */ 1114 /* reset h/w block */
1199 if (ippdrv->reset && 1115 if (ippdrv->reset &&
1200 ippdrv->reset(ippdrv->dev)) { 1116 ippdrv->reset(ippdrv->dev)) {
1201 DRM_ERROR("failed to reset.\n");
1202 return -EINVAL; 1117 return -EINVAL;
1203 } 1118 }
1204 1119
@@ -1216,30 +1131,24 @@ static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1216 /* set format */ 1131 /* set format */
1217 if (ops->set_fmt) { 1132 if (ops->set_fmt) {
1218 ret = ops->set_fmt(ippdrv->dev, config->fmt); 1133 ret = ops->set_fmt(ippdrv->dev, config->fmt);
1219 if (ret) { 1134 if (ret)
1220 DRM_ERROR("not support format.\n");
1221 return ret; 1135 return ret;
1222 }
1223 } 1136 }
1224 1137
1225 /* set transform for rotation, flip */ 1138 /* set transform for rotation, flip */
1226 if (ops->set_transf) { 1139 if (ops->set_transf) {
1227 ret = ops->set_transf(ippdrv->dev, config->degree, 1140 ret = ops->set_transf(ippdrv->dev, config->degree,
1228 config->flip, &swap); 1141 config->flip, &swap);
1229 if (ret) { 1142 if (ret)
1230 DRM_ERROR("not support tranf.\n"); 1143 return ret;
1231 return -EINVAL;
1232 }
1233 } 1144 }
1234 1145
1235 /* set size */ 1146 /* set size */
1236 if (ops->set_size) { 1147 if (ops->set_size) {
1237 ret = ops->set_size(ippdrv->dev, swap, &config->pos, 1148 ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1238 &config->sz); 1149 &config->sz);
1239 if (ret) { 1150 if (ret)
1240 DRM_ERROR("not support size.\n");
1241 return ret; 1151 return ret;
1242 }
1243 } 1152 }
1244 } 1153 }
1245 1154
@@ -1283,11 +1192,6 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1283 1192
1284 m_node = list_first_entry(head, 1193 m_node = list_first_entry(head,
1285 struct drm_exynos_ipp_mem_node, list); 1194 struct drm_exynos_ipp_mem_node, list);
1286 if (!m_node) {
1287 DRM_ERROR("failed to get node.\n");
1288 ret = -EFAULT;
1289 goto err_unlock;
1290 }
1291 1195
1292 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node); 1196 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
1293 1197
@@ -1545,11 +1449,6 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1545 1449
1546 m_node = list_first_entry(head, 1450 m_node = list_first_entry(head,
1547 struct drm_exynos_ipp_mem_node, list); 1451 struct drm_exynos_ipp_mem_node, list);
1548 if (!m_node) {
1549 DRM_ERROR("empty memory node.\n");
1550 ret = -ENOMEM;
1551 goto err_mem_unlock;
1552 }
1553 1452
1554 tbuf_id[i] = m_node->buf_id; 1453 tbuf_id[i] = m_node->buf_id;
1555 DRM_DEBUG_KMS("%s buf_id[%d]\n", 1454 DRM_DEBUG_KMS("%s buf_id[%d]\n",
@@ -1586,11 +1485,6 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1586 1485
1587 m_node = list_first_entry(head, 1486 m_node = list_first_entry(head,
1588 struct drm_exynos_ipp_mem_node, list); 1487 struct drm_exynos_ipp_mem_node, list);
1589 if (!m_node) {
1590 DRM_ERROR("empty memory node.\n");
1591 ret = -ENOMEM;
1592 goto err_mem_unlock;
1593 }
1594 1488
1595 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id; 1489 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1596 1490
@@ -1704,21 +1598,17 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1704 1598
1705 /* get ipp driver entry */ 1599 /* get ipp driver entry */
1706 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1600 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1707 u32 ipp_id;
1708
1709 ippdrv->drm_dev = drm_dev; 1601 ippdrv->drm_dev = drm_dev;
1710 1602
1711 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv, 1603 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv);
1712 &ipp_id); 1604 if (ret < 0) {
1713 if (ret || ipp_id == 0) {
1714 DRM_ERROR("failed to create id.\n"); 1605 DRM_ERROR("failed to create id.\n");
1715 goto err; 1606 goto err;
1716 } 1607 }
1608 ippdrv->prop_list.ipp_id = ret;
1717 1609
1718 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n", 1610 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
1719 count++, (int)ippdrv, ipp_id); 1611 count++, (int)ippdrv, ret);
1720
1721 ippdrv->prop_list.ipp_id = ipp_id;
1722 1612
1723 /* store parent device for node */ 1613 /* store parent device for node */
1724 ippdrv->parent_dev = dev; 1614 ippdrv->parent_dev = dev;
@@ -1776,17 +1666,10 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1776 struct drm_file *file) 1666 struct drm_file *file)
1777{ 1667{
1778 struct drm_exynos_file_private *file_priv = file->driver_priv; 1668 struct drm_exynos_file_private *file_priv = file->driver_priv;
1779 struct exynos_drm_ipp_private *priv;
1780
1781 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1782 if (!priv)
1783 return -ENOMEM;
1784 priv->dev = dev;
1785 file_priv->ipp_priv = priv;
1786 1669
1787 INIT_LIST_HEAD(&priv->event_list); 1670 file_priv->ipp_dev = dev;
1788 1671
1789 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)priv); 1672 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev);
1790 1673
1791 return 0; 1674 return 0;
1792} 1675}
@@ -1795,13 +1678,12 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1795 struct drm_file *file) 1678 struct drm_file *file)
1796{ 1679{
1797 struct drm_exynos_file_private *file_priv = file->driver_priv; 1680 struct drm_exynos_file_private *file_priv = file->driver_priv;
1798 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1799 struct exynos_drm_ippdrv *ippdrv = NULL; 1681 struct exynos_drm_ippdrv *ippdrv = NULL;
1800 struct ipp_context *ctx = get_ipp_context(dev); 1682 struct ipp_context *ctx = get_ipp_context(dev);
1801 struct drm_exynos_ipp_cmd_node *c_node, *tc_node; 1683 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1802 int count = 0; 1684 int count = 0;
1803 1685
1804 DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv); 1686 DRM_DEBUG_KMS("for priv[0x%x]\n", (int)file_priv->ipp_dev);
1805 1687
1806 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1688 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1807 mutex_lock(&ippdrv->cmd_lock); 1689 mutex_lock(&ippdrv->cmd_lock);
@@ -1810,7 +1692,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1810 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", 1692 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1811 count++, (int)ippdrv); 1693 count++, (int)ippdrv);
1812 1694
1813 if (c_node->priv == priv) { 1695 if (c_node->dev == file_priv->ipp_dev) {
1814 /* 1696 /*
1815 * userland goto unnormal state. process killed. 1697 * userland goto unnormal state. process killed.
1816 * and close the file. 1698 * and close the file.
@@ -1832,7 +1714,6 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1832 mutex_unlock(&ippdrv->cmd_lock); 1714 mutex_unlock(&ippdrv->cmd_lock);
1833 } 1715 }
1834 1716
1835 kfree(priv);
1836 return; 1717 return;
1837} 1718}
1838 1719
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
index 7aaeaae757c2..6f48d62aeb30 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -48,7 +48,7 @@ struct drm_exynos_ipp_cmd_work {
48/* 48/*
49 * A structure of command node. 49 * A structure of command node.
50 * 50 *
51 * @priv: IPP private information. 51 * @dev: IPP device.
52 * @list: list head to command queue information. 52 * @list: list head to command queue information.
53 * @event_list: list head of event. 53 * @event_list: list head of event.
54 * @mem_list: list head to source,destination memory queue information. 54 * @mem_list: list head to source,destination memory queue information.
@@ -64,7 +64,7 @@ struct drm_exynos_ipp_cmd_work {
64 * @state: state of command node. 64 * @state: state of command node.
65 */ 65 */
66struct drm_exynos_ipp_cmd_node { 66struct drm_exynos_ipp_cmd_node {
67 struct exynos_drm_ipp_private *priv; 67 struct device *dev;
68 struct list_head list; 68 struct list_head list;
69 struct list_head event_list; 69 struct list_head event_list;
70 struct list_head mem_list[EXYNOS_DRM_OPS_MAX]; 70 struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index f01fbb6dc1f0..55af6b41c1df 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -691,6 +691,7 @@ static const struct of_device_id exynos_rotator_match[] = {
691 }, 691 },
692 {}, 692 {},
693}; 693};
694MODULE_DEVICE_TABLE(of, exynos_rotator_match);
694 695
695static int rotator_probe(struct platform_device *pdev) 696static int rotator_probe(struct platform_device *pdev)
696{ 697{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 2fb8705d6461..9528d81d8004 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -562,7 +562,7 @@ static int vidi_create_connector(struct exynos_drm_display *display,
562 } 562 }
563 563
564 drm_connector_helper_add(connector, &vidi_connector_helper_funcs); 564 drm_connector_helper_add(connector, &vidi_connector_helper_funcs);
565 drm_sysfs_connector_add(connector); 565 drm_connector_register(connector);
566 drm_mode_connector_attach_encoder(connector, encoder); 566 drm_mode_connector_attach_encoder(connector, encoder);
567 567
568 return 0; 568 return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index aa259b0a873a..562966db2aa1 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -84,6 +84,7 @@ struct hdmi_resources {
84 struct clk *sclk_hdmiphy; 84 struct clk *sclk_hdmiphy;
85 struct clk *mout_hdmi; 85 struct clk *mout_hdmi;
86 struct regulator_bulk_data *regul_bulk; 86 struct regulator_bulk_data *regul_bulk;
87 struct regulator *reg_hdmi_en;
87 int regul_count; 88 int regul_count;
88}; 89};
89 90
@@ -592,6 +593,13 @@ static struct hdmi_driver_data exynos4212_hdmi_driver_data = {
592 .is_apb_phy = 0, 593 .is_apb_phy = 0,
593}; 594};
594 595
596static struct hdmi_driver_data exynos4210_hdmi_driver_data = {
597 .type = HDMI_TYPE13,
598 .phy_confs = hdmiphy_v13_configs,
599 .phy_conf_count = ARRAY_SIZE(hdmiphy_v13_configs),
600 .is_apb_phy = 0,
601};
602
595static struct hdmi_driver_data exynos5_hdmi_driver_data = { 603static struct hdmi_driver_data exynos5_hdmi_driver_data = {
596 .type = HDMI_TYPE14, 604 .type = HDMI_TYPE14,
597 .phy_confs = hdmiphy_v13_configs, 605 .phy_confs = hdmiphy_v13_configs,
@@ -1129,7 +1137,7 @@ static int hdmi_create_connector(struct exynos_drm_display *display,
1129 } 1137 }
1130 1138
1131 drm_connector_helper_add(connector, &hdmi_connector_helper_funcs); 1139 drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
1132 drm_sysfs_connector_add(connector); 1140 drm_connector_register(connector);
1133 drm_mode_connector_attach_encoder(connector, encoder); 1141 drm_mode_connector_attach_encoder(connector, encoder);
1134 1142
1135 return 0; 1143 return 0;
@@ -1241,14 +1249,13 @@ static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr)
1241 1249
1242static void hdmi_audio_init(struct hdmi_context *hdata) 1250static void hdmi_audio_init(struct hdmi_context *hdata)
1243{ 1251{
1244 u32 sample_rate, bits_per_sample, frame_size_code; 1252 u32 sample_rate, bits_per_sample;
1245 u32 data_num, bit_ch, sample_frq; 1253 u32 data_num, bit_ch, sample_frq;
1246 u32 val; 1254 u32 val;
1247 u8 acr[7]; 1255 u8 acr[7];
1248 1256
1249 sample_rate = 44100; 1257 sample_rate = 44100;
1250 bits_per_sample = 16; 1258 bits_per_sample = 16;
1251 frame_size_code = 0;
1252 1259
1253 switch (bits_per_sample) { 1260 switch (bits_per_sample) {
1254 case 20: 1261 case 20:
@@ -2168,7 +2175,6 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
2168 struct device *dev = hdata->dev; 2175 struct device *dev = hdata->dev;
2169 struct hdmi_resources *res = &hdata->res; 2176 struct hdmi_resources *res = &hdata->res;
2170 static char *supply[] = { 2177 static char *supply[] = {
2171 "hdmi-en",
2172 "vdd", 2178 "vdd",
2173 "vdd_osc", 2179 "vdd_osc",
2174 "vdd_pll", 2180 "vdd_pll",
@@ -2228,6 +2234,20 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
2228 } 2234 }
2229 res->regul_count = ARRAY_SIZE(supply); 2235 res->regul_count = ARRAY_SIZE(supply);
2230 2236
2237 res->reg_hdmi_en = devm_regulator_get(dev, "hdmi-en");
2238 if (IS_ERR(res->reg_hdmi_en) && PTR_ERR(res->reg_hdmi_en) != -ENOENT) {
2239 DRM_ERROR("failed to get hdmi-en regulator\n");
2240 return PTR_ERR(res->reg_hdmi_en);
2241 }
2242 if (!IS_ERR(res->reg_hdmi_en)) {
2243 ret = regulator_enable(res->reg_hdmi_en);
2244 if (ret) {
2245 DRM_ERROR("failed to enable hdmi-en regulator\n");
2246 return ret;
2247 }
2248 } else
2249 res->reg_hdmi_en = NULL;
2250
2231 return ret; 2251 return ret;
2232fail: 2252fail:
2233 DRM_ERROR("HDMI resource init - failed\n"); 2253 DRM_ERROR("HDMI resource init - failed\n");
@@ -2263,6 +2283,9 @@ static struct of_device_id hdmi_match_types[] = {
2263 .compatible = "samsung,exynos5-hdmi", 2283 .compatible = "samsung,exynos5-hdmi",
2264 .data = &exynos5_hdmi_driver_data, 2284 .data = &exynos5_hdmi_driver_data,
2265 }, { 2285 }, {
2286 .compatible = "samsung,exynos4210-hdmi",
2287 .data = &exynos4210_hdmi_driver_data,
2288 }, {
2266 .compatible = "samsung,exynos4212-hdmi", 2289 .compatible = "samsung,exynos4212-hdmi",
2267 .data = &exynos4212_hdmi_driver_data, 2290 .data = &exynos4212_hdmi_driver_data,
2268 }, { 2291 }, {
@@ -2272,6 +2295,7 @@ static struct of_device_id hdmi_match_types[] = {
2272 /* end node */ 2295 /* end node */
2273 } 2296 }
2274}; 2297};
2298MODULE_DEVICE_TABLE (of, hdmi_match_types);
2275 2299
2276static int hdmi_bind(struct device *dev, struct device *master, void *data) 2300static int hdmi_bind(struct device *dev, struct device *master, void *data)
2277{ 2301{
@@ -2494,7 +2518,11 @@ static int hdmi_remove(struct platform_device *pdev)
2494 2518
2495 cancel_delayed_work_sync(&hdata->hotplug_work); 2519 cancel_delayed_work_sync(&hdata->hotplug_work);
2496 2520
2497 put_device(&hdata->hdmiphy_port->dev); 2521 if (hdata->res.reg_hdmi_en)
2522 regulator_disable(hdata->res.reg_hdmi_en);
2523
2524 if (hdata->hdmiphy_port)
2525 put_device(&hdata->hdmiphy_port->dev);
2498 put_device(&hdata->ddc_adpt->dev); 2526 put_device(&hdata->ddc_adpt->dev);
2499 2527
2500 pm_runtime_disable(&pdev->dev); 2528 pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 7529946d0a74..e8b4ec84b312 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -76,7 +76,7 @@ struct mixer_resources {
76 struct clk *vp; 76 struct clk *vp;
77 struct clk *sclk_mixer; 77 struct clk *sclk_mixer;
78 struct clk *sclk_hdmi; 78 struct clk *sclk_hdmi;
79 struct clk *sclk_dac; 79 struct clk *mout_mixer;
80}; 80};
81 81
82enum mixer_version_id { 82enum mixer_version_id {
@@ -93,6 +93,7 @@ struct mixer_context {
93 bool interlace; 93 bool interlace;
94 bool powered; 94 bool powered;
95 bool vp_enabled; 95 bool vp_enabled;
96 bool has_sclk;
96 u32 int_en; 97 u32 int_en;
97 98
98 struct mutex mixer_mutex; 99 struct mutex mixer_mutex;
@@ -106,6 +107,7 @@ struct mixer_context {
106struct mixer_drv_data { 107struct mixer_drv_data {
107 enum mixer_version_id version; 108 enum mixer_version_id version;
108 bool is_vp_enabled; 109 bool is_vp_enabled;
110 bool has_sclk;
109}; 111};
110 112
111static const u8 filter_y_horiz_tap8[] = { 113static const u8 filter_y_horiz_tap8[] = {
@@ -363,6 +365,11 @@ static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable)
363 vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON); 365 vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
364 mixer_reg_writemask(res, MXR_CFG, val, 366 mixer_reg_writemask(res, MXR_CFG, val,
365 MXR_CFG_VP_ENABLE); 367 MXR_CFG_VP_ENABLE);
368
369 /* control blending of graphic layer 0 */
370 mixer_reg_writemask(res, MXR_GRAPHIC_CFG(0), val,
371 MXR_GRP_CFG_BLEND_PRE_MUL |
372 MXR_GRP_CFG_PIXEL_BLEND_EN);
366 } 373 }
367 break; 374 break;
368 } 375 }
@@ -809,19 +816,23 @@ static int vp_resources_init(struct mixer_context *mixer_ctx)
809 dev_err(dev, "failed to get clock 'vp'\n"); 816 dev_err(dev, "failed to get clock 'vp'\n");
810 return -ENODEV; 817 return -ENODEV;
811 } 818 }
812 mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
813 if (IS_ERR(mixer_res->sclk_mixer)) {
814 dev_err(dev, "failed to get clock 'sclk_mixer'\n");
815 return -ENODEV;
816 }
817 mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
818 if (IS_ERR(mixer_res->sclk_dac)) {
819 dev_err(dev, "failed to get clock 'sclk_dac'\n");
820 return -ENODEV;
821 }
822 819
823 if (mixer_res->sclk_hdmi) 820 if (mixer_ctx->has_sclk) {
824 clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi); 821 mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
822 if (IS_ERR(mixer_res->sclk_mixer)) {
823 dev_err(dev, "failed to get clock 'sclk_mixer'\n");
824 return -ENODEV;
825 }
826 mixer_res->mout_mixer = devm_clk_get(dev, "mout_mixer");
827 if (IS_ERR(mixer_res->mout_mixer)) {
828 dev_err(dev, "failed to get clock 'mout_mixer'\n");
829 return -ENODEV;
830 }
831
832 if (mixer_res->sclk_hdmi && mixer_res->mout_mixer)
833 clk_set_parent(mixer_res->mout_mixer,
834 mixer_res->sclk_hdmi);
835 }
825 836
826 res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 1); 837 res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 1);
827 if (res == NULL) { 838 if (res == NULL) {
@@ -1082,7 +1093,8 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
1082 clk_prepare_enable(res->mixer); 1093 clk_prepare_enable(res->mixer);
1083 if (ctx->vp_enabled) { 1094 if (ctx->vp_enabled) {
1084 clk_prepare_enable(res->vp); 1095 clk_prepare_enable(res->vp);
1085 clk_prepare_enable(res->sclk_mixer); 1096 if (ctx->has_sclk)
1097 clk_prepare_enable(res->sclk_mixer);
1086 } 1098 }
1087 1099
1088 mutex_lock(&ctx->mixer_mutex); 1100 mutex_lock(&ctx->mixer_mutex);
@@ -1121,7 +1133,8 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
1121 clk_disable_unprepare(res->mixer); 1133 clk_disable_unprepare(res->mixer);
1122 if (ctx->vp_enabled) { 1134 if (ctx->vp_enabled) {
1123 clk_disable_unprepare(res->vp); 1135 clk_disable_unprepare(res->vp);
1124 clk_disable_unprepare(res->sclk_mixer); 1136 if (ctx->has_sclk)
1137 clk_disable_unprepare(res->sclk_mixer);
1125 } 1138 }
1126 1139
1127 pm_runtime_put_sync(ctx->dev); 1140 pm_runtime_put_sync(ctx->dev);
@@ -1189,9 +1202,15 @@ static struct mixer_drv_data exynos5250_mxr_drv_data = {
1189 .is_vp_enabled = 0, 1202 .is_vp_enabled = 0,
1190}; 1203};
1191 1204
1205static struct mixer_drv_data exynos4212_mxr_drv_data = {
1206 .version = MXR_VER_0_0_0_16,
1207 .is_vp_enabled = 1,
1208};
1209
1192static struct mixer_drv_data exynos4210_mxr_drv_data = { 1210static struct mixer_drv_data exynos4210_mxr_drv_data = {
1193 .version = MXR_VER_0_0_0_16, 1211 .version = MXR_VER_0_0_0_16,
1194 .is_vp_enabled = 1, 1212 .is_vp_enabled = 1,
1213 .has_sclk = 1,
1195}; 1214};
1196 1215
1197static struct platform_device_id mixer_driver_types[] = { 1216static struct platform_device_id mixer_driver_types[] = {
@@ -1208,6 +1227,12 @@ static struct platform_device_id mixer_driver_types[] = {
1208 1227
1209static struct of_device_id mixer_match_types[] = { 1228static struct of_device_id mixer_match_types[] = {
1210 { 1229 {
1230 .compatible = "samsung,exynos4210-mixer",
1231 .data = &exynos4210_mxr_drv_data,
1232 }, {
1233 .compatible = "samsung,exynos4212-mixer",
1234 .data = &exynos4212_mxr_drv_data,
1235 }, {
1211 .compatible = "samsung,exynos5-mixer", 1236 .compatible = "samsung,exynos5-mixer",
1212 .data = &exynos5250_mxr_drv_data, 1237 .data = &exynos5250_mxr_drv_data,
1213 }, { 1238 }, {
@@ -1220,6 +1245,7 @@ static struct of_device_id mixer_match_types[] = {
1220 /* end node */ 1245 /* end node */
1221 } 1246 }
1222}; 1247};
1248MODULE_DEVICE_TABLE(of, mixer_match_types);
1223 1249
1224static int mixer_bind(struct device *dev, struct device *manager, void *data) 1250static int mixer_bind(struct device *dev, struct device *manager, void *data)
1225{ 1251{
@@ -1251,6 +1277,7 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
1251 ctx->pdev = pdev; 1277 ctx->pdev = pdev;
1252 ctx->dev = dev; 1278 ctx->dev = dev;
1253 ctx->vp_enabled = drv->is_vp_enabled; 1279 ctx->vp_enabled = drv->is_vp_enabled;
1280 ctx->has_sclk = drv->has_sclk;
1254 ctx->mxr_ver = drv->version; 1281 ctx->mxr_ver = drv->version;
1255 init_waitqueue_head(&ctx->wait_vsync_queue); 1282 init_waitqueue_head(&ctx->wait_vsync_queue);
1256 atomic_set(&ctx->wait_vsync_event, 0); 1283 atomic_set(&ctx->wait_vsync_event, 0);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index c18268cd516e..248c33a35ebf 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -192,7 +192,7 @@ static void cdv_intel_crt_destroy(struct drm_connector *connector)
192 struct gma_encoder *gma_encoder = gma_attached_encoder(connector); 192 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
193 193
194 psb_intel_i2c_destroy(gma_encoder->ddc_bus); 194 psb_intel_i2c_destroy(gma_encoder->ddc_bus);
195 drm_sysfs_connector_remove(connector); 195 drm_connector_unregister(connector);
196 drm_connector_cleanup(connector); 196 drm_connector_cleanup(connector);
197 kfree(connector); 197 kfree(connector);
198} 198}
@@ -304,7 +304,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
304 drm_connector_helper_add(connector, 304 drm_connector_helper_add(connector,
305 &cdv_intel_crt_connector_helper_funcs); 305 &cdv_intel_crt_connector_helper_funcs);
306 306
307 drm_sysfs_connector_add(connector); 307 drm_connector_register(connector);
308 308
309 return; 309 return;
310failed_ddc: 310failed_ddc:
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 9ff30c2efadb..a4cc0e60a1be 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1713,7 +1713,7 @@ cdv_intel_dp_destroy(struct drm_connector *connector)
1713 } 1713 }
1714 } 1714 }
1715 i2c_del_adapter(&intel_dp->adapter); 1715 i2c_del_adapter(&intel_dp->adapter);
1716 drm_sysfs_connector_remove(connector); 1716 drm_connector_unregister(connector);
1717 drm_connector_cleanup(connector); 1717 drm_connector_cleanup(connector);
1718 kfree(connector); 1718 kfree(connector);
1719} 1719}
@@ -1847,7 +1847,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
1847 connector->interlace_allowed = false; 1847 connector->interlace_allowed = false;
1848 connector->doublescan_allowed = false; 1848 connector->doublescan_allowed = false;
1849 1849
1850 drm_sysfs_connector_add(connector); 1850 drm_connector_register(connector);
1851 1851
1852 /* Set up the DDC bus. */ 1852 /* Set up the DDC bus. */
1853 switch (output_reg) { 1853 switch (output_reg) {
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index b99084b3f706..4268bf210034 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -248,7 +248,7 @@ static void cdv_hdmi_destroy(struct drm_connector *connector)
248 248
249 if (gma_encoder->i2c_bus) 249 if (gma_encoder->i2c_bus)
250 psb_intel_i2c_destroy(gma_encoder->i2c_bus); 250 psb_intel_i2c_destroy(gma_encoder->i2c_bus);
251 drm_sysfs_connector_remove(connector); 251 drm_connector_unregister(connector);
252 drm_connector_cleanup(connector); 252 drm_connector_cleanup(connector);
253 kfree(connector); 253 kfree(connector);
254} 254}
@@ -356,7 +356,7 @@ void cdv_hdmi_init(struct drm_device *dev,
356 356
357 hdmi_priv->hdmi_i2c_adapter = &(gma_encoder->i2c_bus->adapter); 357 hdmi_priv->hdmi_i2c_adapter = &(gma_encoder->i2c_bus->adapter);
358 hdmi_priv->dev = dev; 358 hdmi_priv->dev = dev;
359 drm_sysfs_connector_add(connector); 359 drm_connector_register(connector);
360 return; 360 return;
361 361
362failed_ddc: 362failed_ddc:
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 8ecc920fc26d..0b770396548c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -446,7 +446,7 @@ static void cdv_intel_lvds_destroy(struct drm_connector *connector)
446 446
447 if (gma_encoder->i2c_bus) 447 if (gma_encoder->i2c_bus)
448 psb_intel_i2c_destroy(gma_encoder->i2c_bus); 448 psb_intel_i2c_destroy(gma_encoder->i2c_bus);
449 drm_sysfs_connector_remove(connector); 449 drm_connector_unregister(connector);
450 drm_connector_cleanup(connector); 450 drm_connector_cleanup(connector);
451 kfree(connector); 451 kfree(connector);
452} 452}
@@ -774,7 +774,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
774 774
775out: 775out:
776 mutex_unlock(&dev->mode_config.mutex); 776 mutex_unlock(&dev->mode_config.mutex);
777 drm_sysfs_connector_add(connector); 777 drm_connector_register(connector);
778 return; 778 return;
779 779
780failed_find: 780failed_find:
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index e7fcc148f333..d0dd3bea8aa5 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -561,7 +561,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
561 return psbfb_create(psb_fbdev, sizes); 561 return psbfb_create(psb_fbdev, sizes);
562} 562}
563 563
564static struct drm_fb_helper_funcs psb_fb_helper_funcs = { 564static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
565 .gamma_set = psbfb_gamma_set, 565 .gamma_set = psbfb_gamma_set,
566 .gamma_get = psbfb_gamma_get, 566 .gamma_get = psbfb_gamma_get,
567 .fb_probe = psbfb_probe, 567 .fb_probe = psbfb_probe,
@@ -600,7 +600,8 @@ int psb_fbdev_init(struct drm_device *dev)
600 } 600 }
601 601
602 dev_priv->fbdev = fbdev; 602 dev_priv->fbdev = fbdev;
603 fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs; 603
604 drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
604 605
605 drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs, 606 drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
606 INTELFB_CONN_LIMIT); 607 INTELFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 592d205a0089..ce015db59dc6 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -206,7 +206,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
206 206
207 WARN_ON(gt->pages); 207 WARN_ON(gt->pages);
208 208
209 pages = drm_gem_get_pages(&gt->gem, 0); 209 pages = drm_gem_get_pages(&gt->gem);
210 if (IS_ERR(pages)) 210 if (IS_ERR(pages))
211 return PTR_ERR(pages); 211 return PTR_ERR(pages);
212 212
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 6e91b20ce2e5..abf2248da61e 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -318,7 +318,7 @@ static void mdfld_dsi_connector_destroy(struct drm_connector *connector)
318 318
319 if (!dsi_connector) 319 if (!dsi_connector)
320 return; 320 return;
321 drm_sysfs_connector_remove(connector); 321 drm_connector_unregister(connector);
322 drm_connector_cleanup(connector); 322 drm_connector_cleanup(connector);
323 sender = dsi_connector->pkg_sender; 323 sender = dsi_connector->pkg_sender;
324 mdfld_dsi_pkg_sender_destroy(sender); 324 mdfld_dsi_pkg_sender_destroy(sender);
@@ -597,7 +597,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
597 dsi_config->encoder = encoder; 597 dsi_config->encoder = encoder;
598 encoder->base.type = (pipe == 0) ? INTEL_OUTPUT_MIPI : 598 encoder->base.type = (pipe == 0) ? INTEL_OUTPUT_MIPI :
599 INTEL_OUTPUT_MIPI2; 599 INTEL_OUTPUT_MIPI2;
600 drm_sysfs_connector_add(connector); 600 drm_connector_register(connector);
601 return; 601 return;
602 602
603 /*TODO: add code to destroy outputs on error*/ 603 /*TODO: add code to destroy outputs on error*/
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index cf018ddcc5a6..e6f5c620a0a2 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -665,7 +665,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
665 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 665 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
666 connector->interlace_allowed = false; 666 connector->interlace_allowed = false;
667 connector->doublescan_allowed = false; 667 connector->doublescan_allowed = false;
668 drm_sysfs_connector_add(connector); 668 drm_connector_register(connector);
669 dev_info(dev->dev, "HDMI initialised.\n"); 669 dev_info(dev->dev, "HDMI initialised.\n");
670 670
671 return; 671 return;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 9b099468a5db..0d39da6e8b7a 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -404,7 +404,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
404out: 404out:
405 mutex_unlock(&dev->mode_config.mutex); 405 mutex_unlock(&dev->mode_config.mutex);
406 406
407 drm_sysfs_connector_add(connector); 407 drm_connector_register(connector);
408 return; 408 return;
409 409
410failed_find: 410failed_find:
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index d7778d0472c1..88aad95bde09 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -563,7 +563,7 @@ void psb_intel_lvds_destroy(struct drm_connector *connector)
563 563
564 if (lvds_priv->ddc_bus) 564 if (lvds_priv->ddc_bus)
565 psb_intel_i2c_destroy(lvds_priv->ddc_bus); 565 psb_intel_i2c_destroy(lvds_priv->ddc_bus);
566 drm_sysfs_connector_remove(connector); 566 drm_connector_unregister(connector);
567 drm_connector_cleanup(connector); 567 drm_connector_cleanup(connector);
568 kfree(connector); 568 kfree(connector);
569} 569}
@@ -829,7 +829,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
829 */ 829 */
830out: 830out:
831 mutex_unlock(&dev->mode_config.mutex); 831 mutex_unlock(&dev->mode_config.mutex);
832 drm_sysfs_connector_add(connector); 832 drm_connector_register(connector);
833 return; 833 return;
834 834
835failed_find: 835failed_find:
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index deeb0829b129..0be96fdb5e28 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1682,7 +1682,7 @@ static void psb_intel_sdvo_destroy(struct drm_connector *connector)
1682 psb_intel_sdvo_connector->tv_format); 1682 psb_intel_sdvo_connector->tv_format);
1683 1683
1684 psb_intel_sdvo_destroy_enhance_property(connector); 1684 psb_intel_sdvo_destroy_enhance_property(connector);
1685 drm_sysfs_connector_remove(connector); 1685 drm_connector_unregister(connector);
1686 drm_connector_cleanup(connector); 1686 drm_connector_cleanup(connector);
1687 kfree(connector); 1687 kfree(connector);
1688} 1688}
@@ -2071,7 +2071,7 @@ psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
2071 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; 2071 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
2072 2072
2073 gma_connector_attach_encoder(&connector->base, &encoder->base); 2073 gma_connector_attach_encoder(&connector->base, &encoder->base);
2074 drm_sysfs_connector_add(&connector->base.base); 2074 drm_connector_register(&connector->base.base);
2075} 2075}
2076 2076
2077static void 2077static void
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index ac357b02bd35..d4762799351d 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -15,8 +15,7 @@
15 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18 18#include <linux/component.h>
19
20#include <linux/hdmi.h> 19#include <linux/hdmi.h>
21#include <linux/module.h> 20#include <linux/module.h>
22#include <linux/irq.h> 21#include <linux/irq.h>
@@ -730,12 +729,9 @@ tda998x_configure_audio(struct tda998x_priv *priv,
730 729
731/* DRM encoder functions */ 730/* DRM encoder functions */
732 731
733static void 732static void tda998x_encoder_set_config(struct tda998x_priv *priv,
734tda998x_encoder_set_config(struct drm_encoder *encoder, void *params) 733 const struct tda998x_encoder_params *p)
735{ 734{
736 struct tda998x_priv *priv = to_tda998x_priv(encoder);
737 struct tda998x_encoder_params *p = params;
738
739 priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) | 735 priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
740 (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) | 736 (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
741 VIP_CNTRL_0_SWAP_B(p->swap_b) | 737 VIP_CNTRL_0_SWAP_B(p->swap_b) |
@@ -752,11 +748,8 @@ tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
752 priv->params = *p; 748 priv->params = *p;
753} 749}
754 750
755static void 751static void tda998x_encoder_dpms(struct tda998x_priv *priv, int mode)
756tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
757{ 752{
758 struct tda998x_priv *priv = to_tda998x_priv(encoder);
759
760 /* we only care about on or off: */ 753 /* we only care about on or off: */
761 if (mode != DRM_MODE_DPMS_ON) 754 if (mode != DRM_MODE_DPMS_ON)
762 mode = DRM_MODE_DPMS_OFF; 755 mode = DRM_MODE_DPMS_OFF;
@@ -806,9 +799,8 @@ tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
806 return true; 799 return true;
807} 800}
808 801
809static int 802static int tda998x_encoder_mode_valid(struct tda998x_priv *priv,
810tda998x_encoder_mode_valid(struct drm_encoder *encoder, 803 struct drm_display_mode *mode)
811 struct drm_display_mode *mode)
812{ 804{
813 if (mode->clock > 150000) 805 if (mode->clock > 150000)
814 return MODE_CLOCK_HIGH; 806 return MODE_CLOCK_HIGH;
@@ -820,11 +812,10 @@ tda998x_encoder_mode_valid(struct drm_encoder *encoder,
820} 812}
821 813
822static void 814static void
823tda998x_encoder_mode_set(struct drm_encoder *encoder, 815tda998x_encoder_mode_set(struct tda998x_priv *priv,
824 struct drm_display_mode *mode, 816 struct drm_display_mode *mode,
825 struct drm_display_mode *adjusted_mode) 817 struct drm_display_mode *adjusted_mode)
826{ 818{
827 struct tda998x_priv *priv = to_tda998x_priv(encoder);
828 uint16_t ref_pix, ref_line, n_pix, n_line; 819 uint16_t ref_pix, ref_line, n_pix, n_line;
829 uint16_t hs_pix_s, hs_pix_e; 820 uint16_t hs_pix_s, hs_pix_e;
830 uint16_t vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e; 821 uint16_t vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
@@ -1012,20 +1003,16 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
1012} 1003}
1013 1004
1014static enum drm_connector_status 1005static enum drm_connector_status
1015tda998x_encoder_detect(struct drm_encoder *encoder, 1006tda998x_encoder_detect(struct tda998x_priv *priv)
1016 struct drm_connector *connector)
1017{ 1007{
1018 struct tda998x_priv *priv = to_tda998x_priv(encoder);
1019 uint8_t val = cec_read(priv, REG_CEC_RXSHPDLEV); 1008 uint8_t val = cec_read(priv, REG_CEC_RXSHPDLEV);
1020 1009
1021 return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected : 1010 return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
1022 connector_status_disconnected; 1011 connector_status_disconnected;
1023} 1012}
1024 1013
1025static int 1014static int read_edid_block(struct tda998x_priv *priv, uint8_t *buf, int blk)
1026read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
1027{ 1015{
1028 struct tda998x_priv *priv = to_tda998x_priv(encoder);
1029 uint8_t offset, segptr; 1016 uint8_t offset, segptr;
1030 int ret, i; 1017 int ret, i;
1031 1018
@@ -1079,10 +1066,8 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
1079 return 0; 1066 return 0;
1080} 1067}
1081 1068
1082static uint8_t * 1069static uint8_t *do_get_edid(struct tda998x_priv *priv)
1083do_get_edid(struct drm_encoder *encoder)
1084{ 1070{
1085 struct tda998x_priv *priv = to_tda998x_priv(encoder);
1086 int j, valid_extensions = 0; 1071 int j, valid_extensions = 0;
1087 uint8_t *block, *new; 1072 uint8_t *block, *new;
1088 bool print_bad_edid = drm_debug & DRM_UT_KMS; 1073 bool print_bad_edid = drm_debug & DRM_UT_KMS;
@@ -1094,7 +1079,7 @@ do_get_edid(struct drm_encoder *encoder)
1094 reg_clear(priv, REG_TX4, TX4_PD_RAM); 1079 reg_clear(priv, REG_TX4, TX4_PD_RAM);
1095 1080
1096 /* base block fetch */ 1081 /* base block fetch */
1097 if (read_edid_block(encoder, block, 0)) 1082 if (read_edid_block(priv, block, 0))
1098 goto fail; 1083 goto fail;
1099 1084
1100 if (!drm_edid_block_valid(block, 0, print_bad_edid)) 1085 if (!drm_edid_block_valid(block, 0, print_bad_edid))
@@ -1111,7 +1096,7 @@ do_get_edid(struct drm_encoder *encoder)
1111 1096
1112 for (j = 1; j <= block[0x7e]; j++) { 1097 for (j = 1; j <= block[0x7e]; j++) {
1113 uint8_t *ext_block = block + (valid_extensions + 1) * EDID_LENGTH; 1098 uint8_t *ext_block = block + (valid_extensions + 1) * EDID_LENGTH;
1114 if (read_edid_block(encoder, ext_block, j)) 1099 if (read_edid_block(priv, ext_block, j))
1115 goto fail; 1100 goto fail;
1116 1101
1117 if (!drm_edid_block_valid(ext_block, j, print_bad_edid)) 1102 if (!drm_edid_block_valid(ext_block, j, print_bad_edid))
@@ -1144,11 +1129,10 @@ fail:
1144} 1129}
1145 1130
1146static int 1131static int
1147tda998x_encoder_get_modes(struct drm_encoder *encoder, 1132tda998x_encoder_get_modes(struct tda998x_priv *priv,
1148 struct drm_connector *connector) 1133 struct drm_connector *connector)
1149{ 1134{
1150 struct tda998x_priv *priv = to_tda998x_priv(encoder); 1135 struct edid *edid = (struct edid *)do_get_edid(priv);
1151 struct edid *edid = (struct edid *)do_get_edid(encoder);
1152 int n = 0; 1136 int n = 0;
1153 1137
1154 if (edid) { 1138 if (edid) {
@@ -1161,18 +1145,14 @@ tda998x_encoder_get_modes(struct drm_encoder *encoder,
1161 return n; 1145 return n;
1162} 1146}
1163 1147
1164static int 1148static void tda998x_encoder_set_polling(struct tda998x_priv *priv,
1165tda998x_encoder_create_resources(struct drm_encoder *encoder, 1149 struct drm_connector *connector)
1166 struct drm_connector *connector)
1167{ 1150{
1168 struct tda998x_priv *priv = to_tda998x_priv(encoder);
1169
1170 if (priv->hdmi->irq) 1151 if (priv->hdmi->irq)
1171 connector->polled = DRM_CONNECTOR_POLL_HPD; 1152 connector->polled = DRM_CONNECTOR_POLL_HPD;
1172 else 1153 else
1173 connector->polled = DRM_CONNECTOR_POLL_CONNECT | 1154 connector->polled = DRM_CONNECTOR_POLL_CONNECT |
1174 DRM_CONNECTOR_POLL_DISCONNECT; 1155 DRM_CONNECTOR_POLL_DISCONNECT;
1175 return 0;
1176} 1156}
1177 1157
1178static int 1158static int
@@ -1185,66 +1165,97 @@ tda998x_encoder_set_property(struct drm_encoder *encoder,
1185 return 0; 1165 return 0;
1186} 1166}
1187 1167
1188static void 1168static void tda998x_destroy(struct tda998x_priv *priv)
1189tda998x_encoder_destroy(struct drm_encoder *encoder)
1190{ 1169{
1191 struct tda998x_priv *priv = to_tda998x_priv(encoder);
1192
1193 /* disable all IRQs and free the IRQ handler */ 1170 /* disable all IRQs and free the IRQ handler */
1194 cec_write(priv, REG_CEC_RXSHPDINTENA, 0); 1171 cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
1195 reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); 1172 reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
1196 if (priv->hdmi->irq) 1173 if (priv->hdmi->irq)
1197 free_irq(priv->hdmi->irq, priv); 1174 free_irq(priv->hdmi->irq, priv);
1198 1175
1199 if (priv->cec) 1176 i2c_unregister_device(priv->cec);
1200 i2c_unregister_device(priv->cec); 1177}
1178
1179/* Slave encoder support */
1180
1181static void
1182tda998x_encoder_slave_set_config(struct drm_encoder *encoder, void *params)
1183{
1184 tda998x_encoder_set_config(to_tda998x_priv(encoder), params);
1185}
1186
1187static void tda998x_encoder_slave_destroy(struct drm_encoder *encoder)
1188{
1189 struct tda998x_priv *priv = to_tda998x_priv(encoder);
1190
1191 tda998x_destroy(priv);
1201 drm_i2c_encoder_destroy(encoder); 1192 drm_i2c_encoder_destroy(encoder);
1202 kfree(priv); 1193 kfree(priv);
1203} 1194}
1204 1195
1205static struct drm_encoder_slave_funcs tda998x_encoder_funcs = { 1196static void tda998x_encoder_slave_dpms(struct drm_encoder *encoder, int mode)
1206 .set_config = tda998x_encoder_set_config, 1197{
1207 .destroy = tda998x_encoder_destroy, 1198 tda998x_encoder_dpms(to_tda998x_priv(encoder), mode);
1208 .dpms = tda998x_encoder_dpms, 1199}
1209 .save = tda998x_encoder_save,
1210 .restore = tda998x_encoder_restore,
1211 .mode_fixup = tda998x_encoder_mode_fixup,
1212 .mode_valid = tda998x_encoder_mode_valid,
1213 .mode_set = tda998x_encoder_mode_set,
1214 .detect = tda998x_encoder_detect,
1215 .get_modes = tda998x_encoder_get_modes,
1216 .create_resources = tda998x_encoder_create_resources,
1217 .set_property = tda998x_encoder_set_property,
1218};
1219 1200
1220/* I2C driver functions */ 1201static int tda998x_encoder_slave_mode_valid(struct drm_encoder *encoder,
1202 struct drm_display_mode *mode)
1203{
1204 return tda998x_encoder_mode_valid(to_tda998x_priv(encoder), mode);
1205}
1221 1206
1222static int 1207static void
1223tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id) 1208tda998x_encoder_slave_mode_set(struct drm_encoder *encoder,
1209 struct drm_display_mode *mode,
1210 struct drm_display_mode *adjusted_mode)
1224{ 1211{
1225 return 0; 1212 tda998x_encoder_mode_set(to_tda998x_priv(encoder), mode, adjusted_mode);
1213}
1214
1215static enum drm_connector_status
1216tda998x_encoder_slave_detect(struct drm_encoder *encoder,
1217 struct drm_connector *connector)
1218{
1219 return tda998x_encoder_detect(to_tda998x_priv(encoder));
1220}
1221
1222static int tda998x_encoder_slave_get_modes(struct drm_encoder *encoder,
1223 struct drm_connector *connector)
1224{
1225 return tda998x_encoder_get_modes(to_tda998x_priv(encoder), connector);
1226} 1226}
1227 1227
1228static int 1228static int
1229tda998x_remove(struct i2c_client *client) 1229tda998x_encoder_slave_create_resources(struct drm_encoder *encoder,
1230 struct drm_connector *connector)
1230{ 1231{
1232 tda998x_encoder_set_polling(to_tda998x_priv(encoder), connector);
1231 return 0; 1233 return 0;
1232} 1234}
1233 1235
1234static int 1236static struct drm_encoder_slave_funcs tda998x_encoder_slave_funcs = {
1235tda998x_encoder_init(struct i2c_client *client, 1237 .set_config = tda998x_encoder_slave_set_config,
1236 struct drm_device *dev, 1238 .destroy = tda998x_encoder_slave_destroy,
1237 struct drm_encoder_slave *encoder_slave) 1239 .dpms = tda998x_encoder_slave_dpms,
1240 .save = tda998x_encoder_save,
1241 .restore = tda998x_encoder_restore,
1242 .mode_fixup = tda998x_encoder_mode_fixup,
1243 .mode_valid = tda998x_encoder_slave_mode_valid,
1244 .mode_set = tda998x_encoder_slave_mode_set,
1245 .detect = tda998x_encoder_slave_detect,
1246 .get_modes = tda998x_encoder_slave_get_modes,
1247 .create_resources = tda998x_encoder_slave_create_resources,
1248 .set_property = tda998x_encoder_set_property,
1249};
1250
1251/* I2C driver functions */
1252
1253static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
1238{ 1254{
1239 struct tda998x_priv *priv;
1240 struct device_node *np = client->dev.of_node; 1255 struct device_node *np = client->dev.of_node;
1241 u32 video; 1256 u32 video;
1242 int rev_lo, rev_hi, ret; 1257 int rev_lo, rev_hi, ret;
1243 1258
1244 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1245 if (!priv)
1246 return -ENOMEM;
1247
1248 priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); 1259 priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
1249 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); 1260 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
1250 priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5); 1261 priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
@@ -1252,17 +1263,11 @@ tda998x_encoder_init(struct i2c_client *client,
1252 priv->current_page = 0xff; 1263 priv->current_page = 0xff;
1253 priv->hdmi = client; 1264 priv->hdmi = client;
1254 priv->cec = i2c_new_dummy(client->adapter, 0x34); 1265 priv->cec = i2c_new_dummy(client->adapter, 0x34);
1255 if (!priv->cec) { 1266 if (!priv->cec)
1256 kfree(priv);
1257 return -ENODEV; 1267 return -ENODEV;
1258 }
1259 1268
1260 priv->encoder = &encoder_slave->base;
1261 priv->dpms = DRM_MODE_DPMS_OFF; 1269 priv->dpms = DRM_MODE_DPMS_OFF;
1262 1270
1263 encoder_slave->slave_priv = priv;
1264 encoder_slave->slave_funcs = &tda998x_encoder_funcs;
1265
1266 /* wake up the device: */ 1271 /* wake up the device: */
1267 cec_write(priv, REG_CEC_ENAMODS, 1272 cec_write(priv, REG_CEC_ENAMODS,
1268 CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); 1273 CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
@@ -1365,12 +1370,231 @@ fail:
1365 */ 1370 */
1366 if (priv->cec) 1371 if (priv->cec)
1367 i2c_unregister_device(priv->cec); 1372 i2c_unregister_device(priv->cec);
1368 kfree(priv);
1369 encoder_slave->slave_priv = NULL;
1370 encoder_slave->slave_funcs = NULL;
1371 return -ENXIO; 1373 return -ENXIO;
1372} 1374}
1373 1375
1376static int tda998x_encoder_init(struct i2c_client *client,
1377 struct drm_device *dev,
1378 struct drm_encoder_slave *encoder_slave)
1379{
1380 struct tda998x_priv *priv;
1381 int ret;
1382
1383 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1384 if (!priv)
1385 return -ENOMEM;
1386
1387 priv->encoder = &encoder_slave->base;
1388
1389 ret = tda998x_create(client, priv);
1390 if (ret) {
1391 kfree(priv);
1392 return ret;
1393 }
1394
1395 encoder_slave->slave_priv = priv;
1396 encoder_slave->slave_funcs = &tda998x_encoder_slave_funcs;
1397
1398 return 0;
1399}
1400
1401struct tda998x_priv2 {
1402 struct tda998x_priv base;
1403 struct drm_encoder encoder;
1404 struct drm_connector connector;
1405};
1406
1407#define conn_to_tda998x_priv2(x) \
1408 container_of(x, struct tda998x_priv2, connector);
1409
1410#define enc_to_tda998x_priv2(x) \
1411 container_of(x, struct tda998x_priv2, encoder);
1412
1413static void tda998x_encoder2_dpms(struct drm_encoder *encoder, int mode)
1414{
1415 struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder);
1416
1417 tda998x_encoder_dpms(&priv->base, mode);
1418}
1419
1420static void tda998x_encoder_prepare(struct drm_encoder *encoder)
1421{
1422 tda998x_encoder2_dpms(encoder, DRM_MODE_DPMS_OFF);
1423}
1424
1425static void tda998x_encoder_commit(struct drm_encoder *encoder)
1426{
1427 tda998x_encoder2_dpms(encoder, DRM_MODE_DPMS_ON);
1428}
1429
1430static void tda998x_encoder2_mode_set(struct drm_encoder *encoder,
1431 struct drm_display_mode *mode,
1432 struct drm_display_mode *adjusted_mode)
1433{
1434 struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder);
1435
1436 tda998x_encoder_mode_set(&priv->base, mode, adjusted_mode);
1437}
1438
1439static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = {
1440 .dpms = tda998x_encoder2_dpms,
1441 .save = tda998x_encoder_save,
1442 .restore = tda998x_encoder_restore,
1443 .mode_fixup = tda998x_encoder_mode_fixup,
1444 .prepare = tda998x_encoder_prepare,
1445 .commit = tda998x_encoder_commit,
1446 .mode_set = tda998x_encoder2_mode_set,
1447};
1448
1449static void tda998x_encoder_destroy(struct drm_encoder *encoder)
1450{
1451 struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder);
1452
1453 tda998x_destroy(&priv->base);
1454 drm_encoder_cleanup(encoder);
1455}
1456
1457static const struct drm_encoder_funcs tda998x_encoder_funcs = {
1458 .destroy = tda998x_encoder_destroy,
1459};
1460
1461static int tda998x_connector_get_modes(struct drm_connector *connector)
1462{
1463 struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
1464
1465 return tda998x_encoder_get_modes(&priv->base, connector);
1466}
1467
1468static int tda998x_connector_mode_valid(struct drm_connector *connector,
1469 struct drm_display_mode *mode)
1470{
1471 struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
1472
1473 return tda998x_encoder_mode_valid(&priv->base, mode);
1474}
1475
1476static struct drm_encoder *
1477tda998x_connector_best_encoder(struct drm_connector *connector)
1478{
1479 struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
1480
1481 return &priv->encoder;
1482}
1483
1484static
1485const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
1486 .get_modes = tda998x_connector_get_modes,
1487 .mode_valid = tda998x_connector_mode_valid,
1488 .best_encoder = tda998x_connector_best_encoder,
1489};
1490
1491static enum drm_connector_status
1492tda998x_connector_detect(struct drm_connector *connector, bool force)
1493{
1494 struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
1495
1496 return tda998x_encoder_detect(&priv->base);
1497}
1498
1499static void tda998x_connector_destroy(struct drm_connector *connector)
1500{
1501 drm_connector_unregister(connector);
1502 drm_connector_cleanup(connector);
1503}
1504
1505static const struct drm_connector_funcs tda998x_connector_funcs = {
1506 .dpms = drm_helper_connector_dpms,
1507 .fill_modes = drm_helper_probe_single_connector_modes,
1508 .detect = tda998x_connector_detect,
1509 .destroy = tda998x_connector_destroy,
1510};
1511
1512static int tda998x_bind(struct device *dev, struct device *master, void *data)
1513{
1514 struct tda998x_encoder_params *params = dev->platform_data;
1515 struct i2c_client *client = to_i2c_client(dev);
1516 struct drm_device *drm = data;
1517 struct tda998x_priv2 *priv;
1518 int ret;
1519
1520 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1521 if (!priv)
1522 return -ENOMEM;
1523
1524 dev_set_drvdata(dev, priv);
1525
1526 priv->base.encoder = &priv->encoder;
1527 priv->connector.interlace_allowed = 1;
1528 priv->encoder.possible_crtcs = 1 << 0;
1529
1530 ret = tda998x_create(client, &priv->base);
1531 if (ret)
1532 return ret;
1533
1534 if (!dev->of_node && params)
1535 tda998x_encoder_set_config(&priv->base, params);
1536
1537 tda998x_encoder_set_polling(&priv->base, &priv->connector);
1538
1539 drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs);
1540 ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
1541 DRM_MODE_ENCODER_TMDS);
1542 if (ret)
1543 goto err_encoder;
1544
1545 drm_connector_helper_add(&priv->connector,
1546 &tda998x_connector_helper_funcs);
1547 ret = drm_connector_init(drm, &priv->connector,
1548 &tda998x_connector_funcs,
1549 DRM_MODE_CONNECTOR_HDMIA);
1550 if (ret)
1551 goto err_connector;
1552
1553 ret = drm_connector_register(&priv->connector);
1554 if (ret)
1555 goto err_sysfs;
1556
1557 priv->connector.encoder = &priv->encoder;
1558 drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
1559
1560 return 0;
1561
1562err_sysfs:
1563 drm_connector_cleanup(&priv->connector);
1564err_connector:
1565 drm_encoder_cleanup(&priv->encoder);
1566err_encoder:
1567 tda998x_destroy(&priv->base);
1568 return ret;
1569}
1570
1571static void tda998x_unbind(struct device *dev, struct device *master,
1572 void *data)
1573{
1574 struct tda998x_priv2 *priv = dev_get_drvdata(dev);
1575
1576 drm_connector_cleanup(&priv->connector);
1577 drm_encoder_cleanup(&priv->encoder);
1578 tda998x_destroy(&priv->base);
1579}
1580
1581static const struct component_ops tda998x_ops = {
1582 .bind = tda998x_bind,
1583 .unbind = tda998x_unbind,
1584};
1585
1586static int
1587tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
1588{
1589 return component_add(&client->dev, &tda998x_ops);
1590}
1591
1592static int tda998x_remove(struct i2c_client *client)
1593{
1594 component_del(&client->dev, &tda998x_ops);
1595 return 0;
1596}
1597
1374#ifdef CONFIG_OF 1598#ifdef CONFIG_OF
1375static const struct of_device_id tda998x_dt_ids[] = { 1599static const struct of_device_id tda998x_dt_ids[] = {
1376 { .compatible = "nxp,tda998x", }, 1600 { .compatible = "nxp,tda998x", },
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 437e1824d0bf..4e39ab34eb1c 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -69,15 +69,3 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
69 option changes the default for that module option. 69 option changes the default for that module option.
70 70
71 If in doubt, say "N". 71 If in doubt, say "N".
72
73config DRM_I915_UMS
74 bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
75 depends on DRM_I915 && BROKEN
76 default n
77 help
78 Choose this option if you still need userspace modesetting.
79
80 Userspace modesetting is deprecated for quite some time now, so
81 enable this only if you have ancient versions of the DDX drivers.
82
83 If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index cad1683d8bb5..91bd167e1cb7 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -59,6 +59,7 @@ i915-y += dvo_ch7017.o \
59 intel_crt.o \ 59 intel_crt.o \
60 intel_ddi.o \ 60 intel_ddi.o \
61 intel_dp.o \ 61 intel_dp.o \
62 intel_dp_mst.o \
62 intel_dsi_cmd.o \ 63 intel_dsi_cmd.o \
63 intel_dsi.o \ 64 intel_dsi.o \
64 intel_dsi_pll.o \ 65 intel_dsi_pll.o \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 9d7954366bd2..dea99d92fb4a 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -426,6 +426,9 @@ static const u32 gen7_render_regs[] = {
426 GEN7_SO_WRITE_OFFSET(1), 426 GEN7_SO_WRITE_OFFSET(1),
427 GEN7_SO_WRITE_OFFSET(2), 427 GEN7_SO_WRITE_OFFSET(2),
428 GEN7_SO_WRITE_OFFSET(3), 428 GEN7_SO_WRITE_OFFSET(3),
429 GEN7_L3SQCREG1,
430 GEN7_L3CNTLREG2,
431 GEN7_L3CNTLREG3,
429}; 432};
430 433
431static const u32 gen7_blt_regs[] = { 434static const u32 gen7_blt_regs[] = {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b8c689202c40..9e737b771c40 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -170,11 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
170 } 170 }
171 if (obj->ring != NULL) 171 if (obj->ring != NULL)
172 seq_printf(m, " (%s)", obj->ring->name); 172 seq_printf(m, " (%s)", obj->ring->name);
173 if (obj->frontbuffer_bits)
174 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
173} 175}
174 176
175static void describe_ctx(struct seq_file *m, struct intel_context *ctx) 177static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
176{ 178{
177 seq_putc(m, ctx->is_initialized ? 'I' : 'i'); 179 seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
178 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 180 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
179 seq_putc(m, ' '); 181 seq_putc(m, ' ');
180} 182}
@@ -515,6 +517,11 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
515 struct drm_device *dev = node->minor->dev; 517 struct drm_device *dev = node->minor->dev;
516 unsigned long flags; 518 unsigned long flags;
517 struct intel_crtc *crtc; 519 struct intel_crtc *crtc;
520 int ret;
521
522 ret = mutex_lock_interruptible(&dev->struct_mutex);
523 if (ret)
524 return ret;
518 525
519 for_each_intel_crtc(dev, crtc) { 526 for_each_intel_crtc(dev, crtc) {
520 const char pipe = pipe_name(crtc->pipe); 527 const char pipe = pipe_name(crtc->pipe);
@@ -556,6 +563,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
556 spin_unlock_irqrestore(&dev->event_lock, flags); 563 spin_unlock_irqrestore(&dev->event_lock, flags);
557 } 564 }
558 565
566 mutex_unlock(&dev->struct_mutex);
567
559 return 0; 568 return 0;
560} 569}
561 570
@@ -985,29 +994,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
985 i915_next_seqno_get, i915_next_seqno_set, 994 i915_next_seqno_get, i915_next_seqno_set,
986 "0x%llx\n"); 995 "0x%llx\n");
987 996
988static int i915_rstdby_delays(struct seq_file *m, void *unused)
989{
990 struct drm_info_node *node = m->private;
991 struct drm_device *dev = node->minor->dev;
992 struct drm_i915_private *dev_priv = dev->dev_private;
993 u16 crstanddelay;
994 int ret;
995
996 ret = mutex_lock_interruptible(&dev->struct_mutex);
997 if (ret)
998 return ret;
999 intel_runtime_pm_get(dev_priv);
1000
1001 crstanddelay = I915_READ16(CRSTANDVID);
1002
1003 intel_runtime_pm_put(dev_priv);
1004 mutex_unlock(&dev->struct_mutex);
1005
1006 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
1007
1008 return 0;
1009}
1010
1011static int i915_frequency_info(struct seq_file *m, void *unused) 997static int i915_frequency_info(struct seq_file *m, void *unused)
1012{ 998{
1013 struct drm_info_node *node = m->private; 999 struct drm_info_node *node = m->private;
@@ -1029,7 +1015,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1029 MEMSTAT_VID_SHIFT); 1015 MEMSTAT_VID_SHIFT);
1030 seq_printf(m, "Current P-state: %d\n", 1016 seq_printf(m, "Current P-state: %d\n",
1031 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 1017 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1032 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { 1018 } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
1019 IS_BROADWELL(dev)) {
1033 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1020 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1034 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1021 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1035 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1022 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1048,7 +1035,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1048 1035
1049 reqf = I915_READ(GEN6_RPNSWREQ); 1036 reqf = I915_READ(GEN6_RPNSWREQ);
1050 reqf &= ~GEN6_TURBO_DISABLE; 1037 reqf &= ~GEN6_TURBO_DISABLE;
1051 if (IS_HASWELL(dev)) 1038 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1052 reqf >>= 24; 1039 reqf >>= 24;
1053 else 1040 else
1054 reqf >>= 25; 1041 reqf >>= 25;
@@ -1065,7 +1052,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1065 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 1052 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
1066 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 1053 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
1067 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 1054 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
1068 if (IS_HASWELL(dev)) 1055 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1069 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1056 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1070 else 1057 else
1071 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1058 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -1121,20 +1108,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1121 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1108 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1122 dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER); 1109 dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
1123 } else if (IS_VALLEYVIEW(dev)) { 1110 } else if (IS_VALLEYVIEW(dev)) {
1124 u32 freq_sts, val; 1111 u32 freq_sts;
1125 1112
1126 mutex_lock(&dev_priv->rps.hw_lock); 1113 mutex_lock(&dev_priv->rps.hw_lock);
1127 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1114 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1128 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1115 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1129 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1116 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1130 1117
1131 val = valleyview_rps_max_freq(dev_priv);
1132 seq_printf(m, "max GPU freq: %d MHz\n", 1118 seq_printf(m, "max GPU freq: %d MHz\n",
1133 vlv_gpu_freq(dev_priv, val)); 1119 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1134 1120
1135 val = valleyview_rps_min_freq(dev_priv);
1136 seq_printf(m, "min GPU freq: %d MHz\n", 1121 seq_printf(m, "min GPU freq: %d MHz\n",
1137 vlv_gpu_freq(dev_priv, val)); 1122 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1123
1124 seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
1125 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1138 1126
1139 seq_printf(m, "current GPU freq: %d MHz\n", 1127 seq_printf(m, "current GPU freq: %d MHz\n",
1140 vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1128 vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
@@ -1148,61 +1136,6 @@ out:
1148 return ret; 1136 return ret;
1149} 1137}
1150 1138
1151static int i915_delayfreq_table(struct seq_file *m, void *unused)
1152{
1153 struct drm_info_node *node = m->private;
1154 struct drm_device *dev = node->minor->dev;
1155 struct drm_i915_private *dev_priv = dev->dev_private;
1156 u32 delayfreq;
1157 int ret, i;
1158
1159 ret = mutex_lock_interruptible(&dev->struct_mutex);
1160 if (ret)
1161 return ret;
1162 intel_runtime_pm_get(dev_priv);
1163
1164 for (i = 0; i < 16; i++) {
1165 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
1166 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
1167 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
1168 }
1169
1170 intel_runtime_pm_put(dev_priv);
1171
1172 mutex_unlock(&dev->struct_mutex);
1173
1174 return 0;
1175}
1176
1177static inline int MAP_TO_MV(int map)
1178{
1179 return 1250 - (map * 25);
1180}
1181
1182static int i915_inttoext_table(struct seq_file *m, void *unused)
1183{
1184 struct drm_info_node *node = m->private;
1185 struct drm_device *dev = node->minor->dev;
1186 struct drm_i915_private *dev_priv = dev->dev_private;
1187 u32 inttoext;
1188 int ret, i;
1189
1190 ret = mutex_lock_interruptible(&dev->struct_mutex);
1191 if (ret)
1192 return ret;
1193 intel_runtime_pm_get(dev_priv);
1194
1195 for (i = 1; i <= 32; i++) {
1196 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
1197 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
1198 }
1199
1200 intel_runtime_pm_put(dev_priv);
1201 mutex_unlock(&dev->struct_mutex);
1202
1203 return 0;
1204}
1205
1206static int ironlake_drpc_info(struct seq_file *m) 1139static int ironlake_drpc_info(struct seq_file *m)
1207{ 1140{
1208 struct drm_info_node *node = m->private; 1141 struct drm_info_node *node = m->private;
@@ -1513,10 +1446,17 @@ static int i915_ips_status(struct seq_file *m, void *unused)
1513 1446
1514 intel_runtime_pm_get(dev_priv); 1447 intel_runtime_pm_get(dev_priv);
1515 1448
1516 if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE) 1449 seq_printf(m, "Enabled by kernel parameter: %s\n",
1517 seq_puts(m, "enabled\n"); 1450 yesno(i915.enable_ips));
1518 else 1451
1519 seq_puts(m, "disabled\n"); 1452 if (INTEL_INFO(dev)->gen >= 8) {
1453 seq_puts(m, "Currently: unknown\n");
1454 } else {
1455 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1456 seq_puts(m, "Currently: enabled\n");
1457 else
1458 seq_puts(m, "Currently: disabled\n");
1459 }
1520 1460
1521 intel_runtime_pm_put(dev_priv); 1461 intel_runtime_pm_put(dev_priv);
1522 1462
@@ -1620,26 +1560,6 @@ out:
1620 return ret; 1560 return ret;
1621} 1561}
1622 1562
1623static int i915_gfxec(struct seq_file *m, void *unused)
1624{
1625 struct drm_info_node *node = m->private;
1626 struct drm_device *dev = node->minor->dev;
1627 struct drm_i915_private *dev_priv = dev->dev_private;
1628 int ret;
1629
1630 ret = mutex_lock_interruptible(&dev->struct_mutex);
1631 if (ret)
1632 return ret;
1633 intel_runtime_pm_get(dev_priv);
1634
1635 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1636 intel_runtime_pm_put(dev_priv);
1637
1638 mutex_unlock(&dev->struct_mutex);
1639
1640 return 0;
1641}
1642
1643static int i915_opregion(struct seq_file *m, void *unused) 1563static int i915_opregion(struct seq_file *m, void *unused)
1644{ 1564{
1645 struct drm_info_node *node = m->private; 1565 struct drm_info_node *node = m->private;
@@ -1677,9 +1597,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1677 1597
1678#ifdef CONFIG_DRM_I915_FBDEV 1598#ifdef CONFIG_DRM_I915_FBDEV
1679 struct drm_i915_private *dev_priv = dev->dev_private; 1599 struct drm_i915_private *dev_priv = dev->dev_private;
1680 int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1681 if (ret)
1682 return ret;
1683 1600
1684 ifbdev = dev_priv->fbdev; 1601 ifbdev = dev_priv->fbdev;
1685 fb = to_intel_framebuffer(ifbdev->helper.fb); 1602 fb = to_intel_framebuffer(ifbdev->helper.fb);
@@ -1692,7 +1609,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1692 atomic_read(&fb->base.refcount.refcount)); 1609 atomic_read(&fb->base.refcount.refcount));
1693 describe_obj(m, fb->obj); 1610 describe_obj(m, fb->obj);
1694 seq_putc(m, '\n'); 1611 seq_putc(m, '\n');
1695 mutex_unlock(&dev->mode_config.mutex);
1696#endif 1612#endif
1697 1613
1698 mutex_lock(&dev->mode_config.fb_lock); 1614 mutex_lock(&dev->mode_config.fb_lock);
@@ -1723,7 +1639,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
1723 struct intel_context *ctx; 1639 struct intel_context *ctx;
1724 int ret, i; 1640 int ret, i;
1725 1641
1726 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1642 ret = mutex_lock_interruptible(&dev->struct_mutex);
1727 if (ret) 1643 if (ret)
1728 return ret; 1644 return ret;
1729 1645
@@ -1740,7 +1656,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
1740 } 1656 }
1741 1657
1742 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1658 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1743 if (ctx->obj == NULL) 1659 if (ctx->legacy_hw_ctx.rcs_state == NULL)
1744 continue; 1660 continue;
1745 1661
1746 seq_puts(m, "HW context "); 1662 seq_puts(m, "HW context ");
@@ -1749,11 +1665,11 @@ static int i915_context_status(struct seq_file *m, void *unused)
1749 if (ring->default_context == ctx) 1665 if (ring->default_context == ctx)
1750 seq_printf(m, "(default context %s) ", ring->name); 1666 seq_printf(m, "(default context %s) ", ring->name);
1751 1667
1752 describe_obj(m, ctx->obj); 1668 describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
1753 seq_putc(m, '\n'); 1669 seq_putc(m, '\n');
1754 } 1670 }
1755 1671
1756 mutex_unlock(&dev->mode_config.mutex); 1672 mutex_unlock(&dev->struct_mutex);
1757 1673
1758 return 0; 1674 return 0;
1759} 1675}
@@ -1863,7 +1779,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
1863 if (i915_gem_context_is_default(ctx)) 1779 if (i915_gem_context_is_default(ctx))
1864 seq_puts(m, " default context:\n"); 1780 seq_puts(m, " default context:\n");
1865 else 1781 else
1866 seq_printf(m, " context %d:\n", ctx->id); 1782 seq_printf(m, " context %d:\n", ctx->user_handle);
1867 ppgtt->debug_dump(ppgtt, m); 1783 ppgtt->debug_dump(ppgtt, m);
1868 1784
1869 return 0; 1785 return 0;
@@ -1976,17 +1892,25 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
1976 1892
1977 intel_runtime_pm_get(dev_priv); 1893 intel_runtime_pm_get(dev_priv);
1978 1894
1895 mutex_lock(&dev_priv->psr.lock);
1979 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 1896 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
1980 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 1897 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
1898 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
1899 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
1900 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
1901 dev_priv->psr.busy_frontbuffer_bits);
1902 seq_printf(m, "Re-enable work scheduled: %s\n",
1903 yesno(work_busy(&dev_priv->psr.work.work)));
1981 1904
1982 enabled = HAS_PSR(dev) && 1905 enabled = HAS_PSR(dev) &&
1983 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; 1906 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1984 seq_printf(m, "Enabled: %s\n", yesno(enabled)); 1907 seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
1985 1908
1986 if (HAS_PSR(dev)) 1909 if (HAS_PSR(dev))
1987 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & 1910 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
1988 EDP_PSR_PERF_CNT_MASK; 1911 EDP_PSR_PERF_CNT_MASK;
1989 seq_printf(m, "Performance_Counter: %u\n", psrperf); 1912 seq_printf(m, "Performance_Counter: %u\n", psrperf);
1913 mutex_unlock(&dev_priv->psr.lock);
1990 1914
1991 intel_runtime_pm_put(dev_priv); 1915 intel_runtime_pm_put(dev_priv);
1992 return 0; 1916 return 0;
@@ -2072,7 +1996,7 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
2072 1996
2073 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); 1997 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
2074 seq_printf(m, "IRQs disabled: %s\n", 1998 seq_printf(m, "IRQs disabled: %s\n",
2075 yesno(dev_priv->pm.irqs_disabled)); 1999 yesno(!intel_irqs_enabled(dev_priv)));
2076 2000
2077 return 0; 2001 return 0;
2078} 2002}
@@ -2126,6 +2050,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
2126 return "VGA"; 2050 return "VGA";
2127 case POWER_DOMAIN_AUDIO: 2051 case POWER_DOMAIN_AUDIO:
2128 return "AUDIO"; 2052 return "AUDIO";
2053 case POWER_DOMAIN_PLLS:
2054 return "PLLS";
2129 case POWER_DOMAIN_INIT: 2055 case POWER_DOMAIN_INIT:
2130 return "INIT"; 2056 return "INIT";
2131 default: 2057 default:
@@ -2223,9 +2149,12 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2223 struct drm_crtc *crtc = &intel_crtc->base; 2149 struct drm_crtc *crtc = &intel_crtc->base;
2224 struct intel_encoder *intel_encoder; 2150 struct intel_encoder *intel_encoder;
2225 2151
2226 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2152 if (crtc->primary->fb)
2227 crtc->primary->fb->base.id, crtc->x, crtc->y, 2153 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2228 crtc->primary->fb->width, crtc->primary->fb->height); 2154 crtc->primary->fb->base.id, crtc->x, crtc->y,
2155 crtc->primary->fb->width, crtc->primary->fb->height);
2156 else
2157 seq_puts(m, "\tprimary plane disabled\n");
2229 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2158 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2230 intel_encoder_info(m, intel_crtc, intel_encoder); 2159 intel_encoder_info(m, intel_crtc, intel_encoder);
2231} 2160}
@@ -2287,13 +2216,15 @@ static void intel_connector_info(struct seq_file *m,
2287 seq_printf(m, "\tCEA rev: %d\n", 2216 seq_printf(m, "\tCEA rev: %d\n",
2288 connector->display_info.cea_rev); 2217 connector->display_info.cea_rev);
2289 } 2218 }
2290 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 2219 if (intel_encoder) {
2291 intel_encoder->type == INTEL_OUTPUT_EDP) 2220 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2292 intel_dp_info(m, intel_connector); 2221 intel_encoder->type == INTEL_OUTPUT_EDP)
2293 else if (intel_encoder->type == INTEL_OUTPUT_HDMI) 2222 intel_dp_info(m, intel_connector);
2294 intel_hdmi_info(m, intel_connector); 2223 else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
2295 else if (intel_encoder->type == INTEL_OUTPUT_LVDS) 2224 intel_hdmi_info(m, intel_connector);
2296 intel_lvds_info(m, intel_connector); 2225 else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2226 intel_lvds_info(m, intel_connector);
2227 }
2297 2228
2298 seq_printf(m, "\tmodes:\n"); 2229 seq_printf(m, "\tmodes:\n");
2299 list_for_each_entry(mode, &connector->modes, head) 2230 list_for_each_entry(mode, &connector->modes, head)
@@ -2347,17 +2278,17 @@ static int i915_display_info(struct seq_file *m, void *unused)
2347 bool active; 2278 bool active;
2348 int x, y; 2279 int x, y;
2349 2280
2350 seq_printf(m, "CRTC %d: pipe: %c, active: %s\n", 2281 seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
2351 crtc->base.base.id, pipe_name(crtc->pipe), 2282 crtc->base.base.id, pipe_name(crtc->pipe),
2352 yesno(crtc->active)); 2283 yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h);
2353 if (crtc->active) { 2284 if (crtc->active) {
2354 intel_crtc_info(m, crtc); 2285 intel_crtc_info(m, crtc);
2355 2286
2356 active = cursor_position(dev, crtc->pipe, &x, &y); 2287 active = cursor_position(dev, crtc->pipe, &x, &y);
2357 seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n", 2288 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
2358 yesno(crtc->cursor_base), 2289 yesno(crtc->cursor_base),
2359 x, y, crtc->cursor_addr, 2290 x, y, crtc->cursor_width, crtc->cursor_height,
2360 yesno(active)); 2291 crtc->cursor_addr, yesno(active));
2361 } 2292 }
2362 2293
2363 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 2294 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
@@ -2377,12 +2308,132 @@ static int i915_display_info(struct seq_file *m, void *unused)
2377 return 0; 2308 return 0;
2378} 2309}
2379 2310
2311static int i915_semaphore_status(struct seq_file *m, void *unused)
2312{
2313 struct drm_info_node *node = (struct drm_info_node *) m->private;
2314 struct drm_device *dev = node->minor->dev;
2315 struct drm_i915_private *dev_priv = dev->dev_private;
2316 struct intel_engine_cs *ring;
2317 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
2318 int i, j, ret;
2319
2320 if (!i915_semaphore_is_enabled(dev)) {
2321 seq_puts(m, "Semaphores are disabled\n");
2322 return 0;
2323 }
2324
2325 ret = mutex_lock_interruptible(&dev->struct_mutex);
2326 if (ret)
2327 return ret;
2328 intel_runtime_pm_get(dev_priv);
2329
2330 if (IS_BROADWELL(dev)) {
2331 struct page *page;
2332 uint64_t *seqno;
2333
2334 page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
2335
2336 seqno = (uint64_t *)kmap_atomic(page);
2337 for_each_ring(ring, dev_priv, i) {
2338 uint64_t offset;
2339
2340 seq_printf(m, "%s\n", ring->name);
2341
2342 seq_puts(m, " Last signal:");
2343 for (j = 0; j < num_rings; j++) {
2344 offset = i * I915_NUM_RINGS + j;
2345 seq_printf(m, "0x%08llx (0x%02llx) ",
2346 seqno[offset], offset * 8);
2347 }
2348 seq_putc(m, '\n');
2349
2350 seq_puts(m, " Last wait: ");
2351 for (j = 0; j < num_rings; j++) {
2352 offset = i + (j * I915_NUM_RINGS);
2353 seq_printf(m, "0x%08llx (0x%02llx) ",
2354 seqno[offset], offset * 8);
2355 }
2356 seq_putc(m, '\n');
2357
2358 }
2359 kunmap_atomic(seqno);
2360 } else {
2361 seq_puts(m, " Last signal:");
2362 for_each_ring(ring, dev_priv, i)
2363 for (j = 0; j < num_rings; j++)
2364 seq_printf(m, "0x%08x\n",
2365 I915_READ(ring->semaphore.mbox.signal[j]));
2366 seq_putc(m, '\n');
2367 }
2368
2369 seq_puts(m, "\nSync seqno:\n");
2370 for_each_ring(ring, dev_priv, i) {
2371 for (j = 0; j < num_rings; j++) {
2372 seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]);
2373 }
2374 seq_putc(m, '\n');
2375 }
2376 seq_putc(m, '\n');
2377
2378 intel_runtime_pm_put(dev_priv);
2379 mutex_unlock(&dev->struct_mutex);
2380 return 0;
2381}
2382
2383static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2384{
2385 struct drm_info_node *node = (struct drm_info_node *) m->private;
2386 struct drm_device *dev = node->minor->dev;
2387 struct drm_i915_private *dev_priv = dev->dev_private;
2388 int i;
2389
2390 drm_modeset_lock_all(dev);
2391 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2392 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2393
2394 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
2395 seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount,
2396 pll->active, yesno(pll->on));
2397 seq_printf(m, " tracked hardware state:\n");
2398 seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll);
2399 seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md);
2400 seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0);
2401 seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1);
2402 seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll);
2403 }
2404 drm_modeset_unlock_all(dev);
2405
2406 return 0;
2407}
2408
2380struct pipe_crc_info { 2409struct pipe_crc_info {
2381 const char *name; 2410 const char *name;
2382 struct drm_device *dev; 2411 struct drm_device *dev;
2383 enum pipe pipe; 2412 enum pipe pipe;
2384}; 2413};
2385 2414
2415static int i915_dp_mst_info(struct seq_file *m, void *unused)
2416{
2417 struct drm_info_node *node = (struct drm_info_node *) m->private;
2418 struct drm_device *dev = node->minor->dev;
2419 struct drm_encoder *encoder;
2420 struct intel_encoder *intel_encoder;
2421 struct intel_digital_port *intel_dig_port;
2422 drm_modeset_lock_all(dev);
2423 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2424 intel_encoder = to_intel_encoder(encoder);
2425 if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
2426 continue;
2427 intel_dig_port = enc_to_dig_port(encoder);
2428 if (!intel_dig_port->dp.can_mst)
2429 continue;
2430
2431 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
2432 }
2433 drm_modeset_unlock_all(dev);
2434 return 0;
2435}
2436
2386static int i915_pipe_crc_open(struct inode *inode, struct file *filep) 2437static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
2387{ 2438{
2388 struct pipe_crc_info *info = inode->i_private; 2439 struct pipe_crc_info *info = inode->i_private;
@@ -2849,7 +2900,60 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2849 return 0; 2900 return 0;
2850} 2901}
2851 2902
2852static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 2903static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
2904{
2905 struct drm_i915_private *dev_priv = dev->dev_private;
2906 struct intel_crtc *crtc =
2907 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
2908
2909 drm_modeset_lock_all(dev);
2910 /*
2911 * If we use the eDP transcoder we need to make sure that we don't
2912 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
2913 * relevant on hsw with pipe A when using the always-on power well
2914 * routing.
2915 */
2916 if (crtc->config.cpu_transcoder == TRANSCODER_EDP &&
2917 !crtc->config.pch_pfit.enabled) {
2918 crtc->config.pch_pfit.force_thru = true;
2919
2920 intel_display_power_get(dev_priv,
2921 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
2922
2923 dev_priv->display.crtc_disable(&crtc->base);
2924 dev_priv->display.crtc_enable(&crtc->base);
2925 }
2926 drm_modeset_unlock_all(dev);
2927}
2928
2929static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
2930{
2931 struct drm_i915_private *dev_priv = dev->dev_private;
2932 struct intel_crtc *crtc =
2933 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
2934
2935 drm_modeset_lock_all(dev);
2936 /*
2937 * If we use the eDP transcoder we need to make sure that we don't
2938 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
2939 * relevant on hsw with pipe A when using the always-on power well
2940 * routing.
2941 */
2942 if (crtc->config.pch_pfit.force_thru) {
2943 crtc->config.pch_pfit.force_thru = false;
2944
2945 dev_priv->display.crtc_disable(&crtc->base);
2946 dev_priv->display.crtc_enable(&crtc->base);
2947
2948 intel_display_power_put(dev_priv,
2949 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
2950 }
2951 drm_modeset_unlock_all(dev);
2952}
2953
2954static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
2955 enum pipe pipe,
2956 enum intel_pipe_crc_source *source,
2853 uint32_t *val) 2957 uint32_t *val)
2854{ 2958{
2855 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2959 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
@@ -2863,6 +2967,9 @@ static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2863 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; 2967 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
2864 break; 2968 break;
2865 case INTEL_PIPE_CRC_SOURCE_PF: 2969 case INTEL_PIPE_CRC_SOURCE_PF:
2970 if (IS_HASWELL(dev) && pipe == PIPE_A)
2971 hsw_trans_edp_pipe_A_crc_wa(dev);
2972
2866 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; 2973 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
2867 break; 2974 break;
2868 case INTEL_PIPE_CRC_SOURCE_NONE: 2975 case INTEL_PIPE_CRC_SOURCE_NONE:
@@ -2895,11 +3002,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
2895 else if (INTEL_INFO(dev)->gen < 5) 3002 else if (INTEL_INFO(dev)->gen < 5)
2896 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); 3003 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
2897 else if (IS_VALLEYVIEW(dev)) 3004 else if (IS_VALLEYVIEW(dev))
2898 ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val); 3005 ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
2899 else if (IS_GEN5(dev) || IS_GEN6(dev)) 3006 else if (IS_GEN5(dev) || IS_GEN6(dev))
2900 ret = ilk_pipe_crc_ctl_reg(&source, &val); 3007 ret = ilk_pipe_crc_ctl_reg(&source, &val);
2901 else 3008 else
2902 ret = ivb_pipe_crc_ctl_reg(&source, &val); 3009 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
2903 3010
2904 if (ret != 0) 3011 if (ret != 0)
2905 return ret; 3012 return ret;
@@ -2929,11 +3036,16 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
2929 /* real source -> none transition */ 3036 /* real source -> none transition */
2930 if (source == INTEL_PIPE_CRC_SOURCE_NONE) { 3037 if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
2931 struct intel_pipe_crc_entry *entries; 3038 struct intel_pipe_crc_entry *entries;
3039 struct intel_crtc *crtc =
3040 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2932 3041
2933 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", 3042 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
2934 pipe_name(pipe)); 3043 pipe_name(pipe));
2935 3044
2936 intel_wait_for_vblank(dev, pipe); 3045 drm_modeset_lock(&crtc->base.mutex, NULL);
3046 if (crtc->active)
3047 intel_wait_for_vblank(dev, pipe);
3048 drm_modeset_unlock(&crtc->base.mutex);
2937 3049
2938 spin_lock_irq(&pipe_crc->lock); 3050 spin_lock_irq(&pipe_crc->lock);
2939 entries = pipe_crc->entries; 3051 entries = pipe_crc->entries;
@@ -2946,6 +3058,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
2946 g4x_undo_pipe_scramble_reset(dev, pipe); 3058 g4x_undo_pipe_scramble_reset(dev, pipe);
2947 else if (IS_VALLEYVIEW(dev)) 3059 else if (IS_VALLEYVIEW(dev))
2948 vlv_undo_pipe_scramble_reset(dev, pipe); 3060 vlv_undo_pipe_scramble_reset(dev, pipe);
3061 else if (IS_HASWELL(dev) && pipe == PIPE_A)
3062 hsw_undo_trans_edp_pipe_A_crc_wa(dev);
2949 } 3063 }
2950 3064
2951 return 0; 3065 return 0;
@@ -3177,7 +3291,7 @@ static int pri_wm_latency_open(struct inode *inode, struct file *file)
3177{ 3291{
3178 struct drm_device *dev = inode->i_private; 3292 struct drm_device *dev = inode->i_private;
3179 3293
3180 if (!HAS_PCH_SPLIT(dev)) 3294 if (HAS_GMCH_DISPLAY(dev))
3181 return -ENODEV; 3295 return -ENODEV;
3182 3296
3183 return single_open(file, pri_wm_latency_show, dev); 3297 return single_open(file, pri_wm_latency_show, dev);
@@ -3187,7 +3301,7 @@ static int spr_wm_latency_open(struct inode *inode, struct file *file)
3187{ 3301{
3188 struct drm_device *dev = inode->i_private; 3302 struct drm_device *dev = inode->i_private;
3189 3303
3190 if (!HAS_PCH_SPLIT(dev)) 3304 if (HAS_GMCH_DISPLAY(dev))
3191 return -ENODEV; 3305 return -ENODEV;
3192 3306
3193 return single_open(file, spr_wm_latency_show, dev); 3307 return single_open(file, spr_wm_latency_show, dev);
@@ -3197,7 +3311,7 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file)
3197{ 3311{
3198 struct drm_device *dev = inode->i_private; 3312 struct drm_device *dev = inode->i_private;
3199 3313
3200 if (!HAS_PCH_SPLIT(dev)) 3314 if (HAS_GMCH_DISPLAY(dev))
3201 return -ENODEV; 3315 return -ENODEV;
3202 3316
3203 return single_open(file, cur_wm_latency_show, dev); 3317 return single_open(file, cur_wm_latency_show, dev);
@@ -3506,7 +3620,7 @@ i915_max_freq_get(void *data, u64 *val)
3506 struct drm_i915_private *dev_priv = dev->dev_private; 3620 struct drm_i915_private *dev_priv = dev->dev_private;
3507 int ret; 3621 int ret;
3508 3622
3509 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3623 if (INTEL_INFO(dev)->gen < 6)
3510 return -ENODEV; 3624 return -ENODEV;
3511 3625
3512 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3626 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3532,7 +3646,7 @@ i915_max_freq_set(void *data, u64 val)
3532 u32 rp_state_cap, hw_max, hw_min; 3646 u32 rp_state_cap, hw_max, hw_min;
3533 int ret; 3647 int ret;
3534 3648
3535 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3649 if (INTEL_INFO(dev)->gen < 6)
3536 return -ENODEV; 3650 return -ENODEV;
3537 3651
3538 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3652 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3549,8 +3663,8 @@ i915_max_freq_set(void *data, u64 val)
3549 if (IS_VALLEYVIEW(dev)) { 3663 if (IS_VALLEYVIEW(dev)) {
3550 val = vlv_freq_opcode(dev_priv, val); 3664 val = vlv_freq_opcode(dev_priv, val);
3551 3665
3552 hw_max = valleyview_rps_max_freq(dev_priv); 3666 hw_max = dev_priv->rps.max_freq;
3553 hw_min = valleyview_rps_min_freq(dev_priv); 3667 hw_min = dev_priv->rps.min_freq;
3554 } else { 3668 } else {
3555 do_div(val, GT_FREQUENCY_MULTIPLIER); 3669 do_div(val, GT_FREQUENCY_MULTIPLIER);
3556 3670
@@ -3587,7 +3701,7 @@ i915_min_freq_get(void *data, u64 *val)
3587 struct drm_i915_private *dev_priv = dev->dev_private; 3701 struct drm_i915_private *dev_priv = dev->dev_private;
3588 int ret; 3702 int ret;
3589 3703
3590 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3704 if (INTEL_INFO(dev)->gen < 6)
3591 return -ENODEV; 3705 return -ENODEV;
3592 3706
3593 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3707 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3613,7 +3727,7 @@ i915_min_freq_set(void *data, u64 val)
3613 u32 rp_state_cap, hw_max, hw_min; 3727 u32 rp_state_cap, hw_max, hw_min;
3614 int ret; 3728 int ret;
3615 3729
3616 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3730 if (INTEL_INFO(dev)->gen < 6)
3617 return -ENODEV; 3731 return -ENODEV;
3618 3732
3619 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3733 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3630,8 +3744,8 @@ i915_min_freq_set(void *data, u64 val)
3630 if (IS_VALLEYVIEW(dev)) { 3744 if (IS_VALLEYVIEW(dev)) {
3631 val = vlv_freq_opcode(dev_priv, val); 3745 val = vlv_freq_opcode(dev_priv, val);
3632 3746
3633 hw_max = valleyview_rps_max_freq(dev_priv); 3747 hw_max = dev_priv->rps.max_freq;
3634 hw_min = valleyview_rps_min_freq(dev_priv); 3748 hw_min = dev_priv->rps.min_freq;
3635 } else { 3749 } else {
3636 do_div(val, GT_FREQUENCY_MULTIPLIER); 3750 do_div(val, GT_FREQUENCY_MULTIPLIER);
3637 3751
@@ -3799,14 +3913,10 @@ static const struct drm_info_list i915_debugfs_list[] = {
3799 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 3913 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
3800 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 3914 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
3801 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, 3915 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
3802 {"i915_rstdby_delays", i915_rstdby_delays, 0},
3803 {"i915_frequency_info", i915_frequency_info, 0}, 3916 {"i915_frequency_info", i915_frequency_info, 0},
3804 {"i915_delayfreq_table", i915_delayfreq_table, 0},
3805 {"i915_inttoext_table", i915_inttoext_table, 0},
3806 {"i915_drpc_info", i915_drpc_info, 0}, 3917 {"i915_drpc_info", i915_drpc_info, 0},
3807 {"i915_emon_status", i915_emon_status, 0}, 3918 {"i915_emon_status", i915_emon_status, 0},
3808 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 3919 {"i915_ring_freq_table", i915_ring_freq_table, 0},
3809 {"i915_gfxec", i915_gfxec, 0},
3810 {"i915_fbc_status", i915_fbc_status, 0}, 3920 {"i915_fbc_status", i915_fbc_status, 0},
3811 {"i915_ips_status", i915_ips_status, 0}, 3921 {"i915_ips_status", i915_ips_status, 0},
3812 {"i915_sr_status", i915_sr_status, 0}, 3922 {"i915_sr_status", i915_sr_status, 0},
@@ -3823,6 +3933,9 @@ static const struct drm_info_list i915_debugfs_list[] = {
3823 {"i915_pc8_status", i915_pc8_status, 0}, 3933 {"i915_pc8_status", i915_pc8_status, 0},
3824 {"i915_power_domain_info", i915_power_domain_info, 0}, 3934 {"i915_power_domain_info", i915_power_domain_info, 0},
3825 {"i915_display_info", i915_display_info, 0}, 3935 {"i915_display_info", i915_display_info, 0},
3936 {"i915_semaphore_status", i915_semaphore_status, 0},
3937 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
3938 {"i915_dp_mst_info", i915_dp_mst_info, 0},
3826}; 3939};
3827#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 3940#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
3828 3941
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d44344140627..2e7f03ad5ee2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -138,7 +138,7 @@ static void i915_free_hws(struct drm_device *dev)
138 I915_WRITE(HWS_PGA, 0x1ffff000); 138 I915_WRITE(HWS_PGA, 0x1ffff000);
139} 139}
140 140
141void i915_kernel_lost_context(struct drm_device * dev) 141void i915_kernel_lost_context(struct drm_device *dev)
142{ 142{
143 struct drm_i915_private *dev_priv = dev->dev_private; 143 struct drm_i915_private *dev_priv = dev->dev_private;
144 struct drm_i915_master_private *master_priv; 144 struct drm_i915_master_private *master_priv;
@@ -166,7 +166,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
166 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 166 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
167} 167}
168 168
169static int i915_dma_cleanup(struct drm_device * dev) 169static int i915_dma_cleanup(struct drm_device *dev)
170{ 170{
171 struct drm_i915_private *dev_priv = dev->dev_private; 171 struct drm_i915_private *dev_priv = dev->dev_private;
172 int i; 172 int i;
@@ -190,7 +190,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
190 return 0; 190 return 0;
191} 191}
192 192
193static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) 193static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
194{ 194{
195 struct drm_i915_private *dev_priv = dev->dev_private; 195 struct drm_i915_private *dev_priv = dev->dev_private;
196 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 196 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -235,7 +235,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
235 return 0; 235 return 0;
236} 236}
237 237
238static int i915_dma_resume(struct drm_device * dev) 238static int i915_dma_resume(struct drm_device *dev)
239{ 239{
240 struct drm_i915_private *dev_priv = dev->dev_private; 240 struct drm_i915_private *dev_priv = dev->dev_private;
241 struct intel_engine_cs *ring = LP_RING(dev_priv); 241 struct intel_engine_cs *ring = LP_RING(dev_priv);
@@ -359,7 +359,7 @@ static int validate_cmd(int cmd)
359 return 0; 359 return 0;
360} 360}
361 361
362static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) 362static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords)
363{ 363{
364 struct drm_i915_private *dev_priv = dev->dev_private; 364 struct drm_i915_private *dev_priv = dev->dev_private;
365 int i, ret; 365 int i, ret;
@@ -369,6 +369,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
369 369
370 for (i = 0; i < dwords;) { 370 for (i = 0; i < dwords;) {
371 int sz = validate_cmd(buffer[i]); 371 int sz = validate_cmd(buffer[i]);
372
372 if (sz == 0 || i + sz > dwords) 373 if (sz == 0 || i + sz > dwords)
373 return -EINVAL; 374 return -EINVAL;
374 i += sz; 375 i += sz;
@@ -453,7 +454,7 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
453 } 454 }
454} 455}
455 456
456static int i915_dispatch_cmdbuffer(struct drm_device * dev, 457static int i915_dispatch_cmdbuffer(struct drm_device *dev,
457 drm_i915_cmdbuffer_t *cmd, 458 drm_i915_cmdbuffer_t *cmd,
458 struct drm_clip_rect *cliprects, 459 struct drm_clip_rect *cliprects,
459 void *cmdbuf) 460 void *cmdbuf)
@@ -487,8 +488,8 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
487 return 0; 488 return 0;
488} 489}
489 490
490static int i915_dispatch_batchbuffer(struct drm_device * dev, 491static int i915_dispatch_batchbuffer(struct drm_device *dev,
491 drm_i915_batchbuffer_t * batch, 492 drm_i915_batchbuffer_t *batch,
492 struct drm_clip_rect *cliprects) 493 struct drm_clip_rect *cliprects)
493{ 494{
494 struct drm_i915_private *dev_priv = dev->dev_private; 495 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -549,7 +550,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
549 return 0; 550 return 0;
550} 551}
551 552
552static int i915_dispatch_flip(struct drm_device * dev) 553static int i915_dispatch_flip(struct drm_device *dev)
553{ 554{
554 struct drm_i915_private *dev_priv = dev->dev_private; 555 struct drm_i915_private *dev_priv = dev->dev_private;
555 struct drm_i915_master_private *master_priv = 556 struct drm_i915_master_private *master_priv =
@@ -755,7 +756,7 @@ fail_batch_free:
755 return ret; 756 return ret;
756} 757}
757 758
758static int i915_emit_irq(struct drm_device * dev) 759static int i915_emit_irq(struct drm_device *dev)
759{ 760{
760 struct drm_i915_private *dev_priv = dev->dev_private; 761 struct drm_i915_private *dev_priv = dev->dev_private;
761 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 762 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -781,7 +782,7 @@ static int i915_emit_irq(struct drm_device * dev)
781 return dev_priv->dri1.counter; 782 return dev_priv->dri1.counter;
782} 783}
783 784
784static int i915_wait_irq(struct drm_device * dev, int irq_nr) 785static int i915_wait_irq(struct drm_device *dev, int irq_nr)
785{ 786{
786 struct drm_i915_private *dev_priv = dev->dev_private; 787 struct drm_i915_private *dev_priv = dev->dev_private;
787 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 788 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -1266,6 +1267,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
1266{ 1267{
1267 struct drm_device *dev = pci_get_drvdata(pdev); 1268 struct drm_device *dev = pci_get_drvdata(pdev);
1268 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 1269 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1270
1269 if (state == VGA_SWITCHEROO_ON) { 1271 if (state == VGA_SWITCHEROO_ON) {
1270 pr_info("switched on\n"); 1272 pr_info("switched on\n");
1271 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1273 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
@@ -1338,6 +1340,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
1338 if (ret) 1340 if (ret)
1339 goto cleanup_gem_stolen; 1341 goto cleanup_gem_stolen;
1340 1342
1343 dev_priv->pm._irqs_disabled = false;
1344
1341 /* Important: The output setup functions called by modeset_init need 1345 /* Important: The output setup functions called by modeset_init need
1342 * working irqs for e.g. gmbus and dp aux transfers. */ 1346 * working irqs for e.g. gmbus and dp aux transfers. */
1343 intel_modeset_init(dev); 1347 intel_modeset_init(dev);
@@ -1375,9 +1379,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
1375 */ 1379 */
1376 intel_fbdev_initial_config(dev); 1380 intel_fbdev_initial_config(dev);
1377 1381
1378 /* Only enable hotplug handling once the fbdev is fully set up. */
1379 dev_priv->enable_hotplug_processing = true;
1380
1381 drm_kms_helper_poll_init(dev); 1382 drm_kms_helper_poll_init(dev);
1382 1383
1383 return 0; 1384 return 0;
@@ -1425,15 +1426,16 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1425} 1426}
1426 1427
1427#if IS_ENABLED(CONFIG_FB) 1428#if IS_ENABLED(CONFIG_FB)
1428static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 1429static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1429{ 1430{
1430 struct apertures_struct *ap; 1431 struct apertures_struct *ap;
1431 struct pci_dev *pdev = dev_priv->dev->pdev; 1432 struct pci_dev *pdev = dev_priv->dev->pdev;
1432 bool primary; 1433 bool primary;
1434 int ret;
1433 1435
1434 ap = alloc_apertures(1); 1436 ap = alloc_apertures(1);
1435 if (!ap) 1437 if (!ap)
1436 return; 1438 return -ENOMEM;
1437 1439
1438 ap->ranges[0].base = dev_priv->gtt.mappable_base; 1440 ap->ranges[0].base = dev_priv->gtt.mappable_base;
1439 ap->ranges[0].size = dev_priv->gtt.mappable_end; 1441 ap->ranges[0].size = dev_priv->gtt.mappable_end;
@@ -1441,13 +1443,16 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1441 primary = 1443 primary =
1442 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; 1444 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
1443 1445
1444 remove_conflicting_framebuffers(ap, "inteldrmfb", primary); 1446 ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
1445 1447
1446 kfree(ap); 1448 kfree(ap);
1449
1450 return ret;
1447} 1451}
1448#else 1452#else
1449static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 1453static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1450{ 1454{
1455 return 0;
1451} 1456}
1452#endif 1457#endif
1453 1458
@@ -1492,10 +1497,11 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1492#define SEP_EMPTY 1497#define SEP_EMPTY
1493#define PRINT_FLAG(name) info->name ? #name "," : "" 1498#define PRINT_FLAG(name) info->name ? #name "," : ""
1494#define SEP_COMMA , 1499#define SEP_COMMA ,
1495 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" 1500 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
1496 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY), 1501 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
1497 info->gen, 1502 info->gen,
1498 dev_priv->dev->pdev->device, 1503 dev_priv->dev->pdev->device,
1504 dev_priv->dev->pdev->revision,
1499 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA)); 1505 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
1500#undef PRINT_S 1506#undef PRINT_S
1501#undef SEP_EMPTY 1507#undef SEP_EMPTY
@@ -1594,7 +1600,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1594 if (dev_priv == NULL) 1600 if (dev_priv == NULL)
1595 return -ENOMEM; 1601 return -ENOMEM;
1596 1602
1597 dev->dev_private = (void *)dev_priv; 1603 dev->dev_private = dev_priv;
1598 dev_priv->dev = dev; 1604 dev_priv->dev = dev;
1599 1605
1600 /* copy initial configuration to dev_priv->info */ 1606 /* copy initial configuration to dev_priv->info */
@@ -1606,6 +1612,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1606 spin_lock_init(&dev_priv->backlight_lock); 1612 spin_lock_init(&dev_priv->backlight_lock);
1607 spin_lock_init(&dev_priv->uncore.lock); 1613 spin_lock_init(&dev_priv->uncore.lock);
1608 spin_lock_init(&dev_priv->mm.object_stat_lock); 1614 spin_lock_init(&dev_priv->mm.object_stat_lock);
1615 spin_lock_init(&dev_priv->mmio_flip_lock);
1609 mutex_init(&dev_priv->dpio_lock); 1616 mutex_init(&dev_priv->dpio_lock);
1610 mutex_init(&dev_priv->modeset_restore_lock); 1617 mutex_init(&dev_priv->modeset_restore_lock);
1611 1618
@@ -1664,7 +1671,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1664 goto out_gtt; 1671 goto out_gtt;
1665 } 1672 }
1666 1673
1667 i915_kick_out_firmware_fb(dev_priv); 1674 ret = i915_kick_out_firmware_fb(dev_priv);
1675 if (ret) {
1676 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1677 goto out_gtt;
1678 }
1668 } 1679 }
1669 1680
1670 pci_set_master(dev->pdev); 1681 pci_set_master(dev->pdev);
@@ -1717,6 +1728,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1717 goto out_mtrrfree; 1728 goto out_mtrrfree;
1718 } 1729 }
1719 1730
1731 dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
1732 if (dev_priv->dp_wq == NULL) {
1733 DRM_ERROR("Failed to create our dp workqueue.\n");
1734 ret = -ENOMEM;
1735 goto out_freewq;
1736 }
1737
1720 intel_irq_init(dev); 1738 intel_irq_init(dev);
1721 intel_uncore_sanitize(dev); 1739 intel_uncore_sanitize(dev);
1722 1740
@@ -1792,6 +1810,8 @@ out_gem_unload:
1792 intel_teardown_gmbus(dev); 1810 intel_teardown_gmbus(dev);
1793 intel_teardown_mchbar(dev); 1811 intel_teardown_mchbar(dev);
1794 pm_qos_remove_request(&dev_priv->pm_qos); 1812 pm_qos_remove_request(&dev_priv->pm_qos);
1813 destroy_workqueue(dev_priv->dp_wq);
1814out_freewq:
1795 destroy_workqueue(dev_priv->wq); 1815 destroy_workqueue(dev_priv->wq);
1796out_mtrrfree: 1816out_mtrrfree:
1797 arch_phys_wc_del(dev_priv->gtt.mtrr); 1817 arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1892,6 +1912,7 @@ int i915_driver_unload(struct drm_device *dev)
1892 intel_teardown_gmbus(dev); 1912 intel_teardown_gmbus(dev);
1893 intel_teardown_mchbar(dev); 1913 intel_teardown_mchbar(dev);
1894 1914
1915 destroy_workqueue(dev_priv->dp_wq);
1895 destroy_workqueue(dev_priv->wq); 1916 destroy_workqueue(dev_priv->wq);
1896 pm_qos_remove_request(&dev_priv->pm_qos); 1917 pm_qos_remove_request(&dev_priv->pm_qos);
1897 1918
@@ -1933,7 +1954,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1933 * and DMA structures, since the kernel won't be using them, and clea 1954 * and DMA structures, since the kernel won't be using them, and clea
1934 * up any GEM state. 1955 * up any GEM state.
1935 */ 1956 */
1936void i915_driver_lastclose(struct drm_device * dev) 1957void i915_driver_lastclose(struct drm_device *dev)
1937{ 1958{
1938 struct drm_i915_private *dev_priv = dev->dev_private; 1959 struct drm_i915_private *dev_priv = dev->dev_private;
1939 1960
@@ -1954,11 +1975,11 @@ void i915_driver_lastclose(struct drm_device * dev)
1954 i915_dma_cleanup(dev); 1975 i915_dma_cleanup(dev);
1955} 1976}
1956 1977
1957void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1978void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1958{ 1979{
1959 mutex_lock(&dev->struct_mutex); 1980 mutex_lock(&dev->struct_mutex);
1960 i915_gem_context_close(dev, file_priv); 1981 i915_gem_context_close(dev, file);
1961 i915_gem_release(dev, file_priv); 1982 i915_gem_release(dev, file);
1962 mutex_unlock(&dev->struct_mutex); 1983 mutex_unlock(&dev->struct_mutex);
1963} 1984}
1964 1985
@@ -2031,7 +2052,7 @@ int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
2031 * manage the gtt, we need to claim that all intel devices are agp. For 2052 * manage the gtt, we need to claim that all intel devices are agp. For
2032 * otherwise the drm core refuses to initialize the agp support code. 2053 * otherwise the drm core refuses to initialize the agp support code.
2033 */ 2054 */
2034int i915_driver_device_is_agp(struct drm_device * dev) 2055int i915_driver_device_is_agp(struct drm_device *dev)
2035{ 2056{
2036 return 1; 2057 return 1;
2037} 2058}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 651e65e051c0..6c4b25ce8bb0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -28,6 +28,7 @@
28 */ 28 */
29 29
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/acpi.h>
31#include <drm/drmP.h> 32#include <drm/drmP.h>
32#include <drm/i915_drm.h> 33#include <drm/i915_drm.h>
33#include "i915_drv.h" 34#include "i915_drv.h"
@@ -46,8 +47,6 @@ static struct drm_driver driver;
46 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ 47 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
47 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ 48 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
48 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ 49 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
49 .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
50 .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
51 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } 50 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
52 51
53#define GEN_CHV_PIPEOFFSETS \ 52#define GEN_CHV_PIPEOFFSETS \
@@ -55,10 +54,6 @@ static struct drm_driver driver;
55 CHV_PIPE_C_OFFSET }, \ 54 CHV_PIPE_C_OFFSET }, \
56 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ 55 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
57 CHV_TRANSCODER_C_OFFSET, }, \ 56 CHV_TRANSCODER_C_OFFSET, }, \
58 .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET, \
59 CHV_DPLL_C_OFFSET }, \
60 .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET, \
61 CHV_DPLL_C_MD_OFFSET }, \
62 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ 57 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
63 CHV_PALETTE_C_OFFSET } 58 CHV_PALETTE_C_OFFSET }
64 59
@@ -308,6 +303,7 @@ static const struct intel_device_info intel_broadwell_d_info = {
308 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 303 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
309 .has_llc = 1, 304 .has_llc = 1,
310 .has_ddi = 1, 305 .has_ddi = 1,
306 .has_fpga_dbg = 1,
311 .has_fbc = 1, 307 .has_fbc = 1,
312 GEN_DEFAULT_PIPEOFFSETS, 308 GEN_DEFAULT_PIPEOFFSETS,
313 IVB_CURSOR_OFFSETS, 309 IVB_CURSOR_OFFSETS,
@@ -319,6 +315,7 @@ static const struct intel_device_info intel_broadwell_m_info = {
319 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 315 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
320 .has_llc = 1, 316 .has_llc = 1,
321 .has_ddi = 1, 317 .has_ddi = 1,
318 .has_fpga_dbg = 1,
322 .has_fbc = 1, 319 .has_fbc = 1,
323 GEN_DEFAULT_PIPEOFFSETS, 320 GEN_DEFAULT_PIPEOFFSETS,
324 IVB_CURSOR_OFFSETS, 321 IVB_CURSOR_OFFSETS,
@@ -330,6 +327,7 @@ static const struct intel_device_info intel_broadwell_gt3d_info = {
330 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 327 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
331 .has_llc = 1, 328 .has_llc = 1,
332 .has_ddi = 1, 329 .has_ddi = 1,
330 .has_fpga_dbg = 1,
333 .has_fbc = 1, 331 .has_fbc = 1,
334 GEN_DEFAULT_PIPEOFFSETS, 332 GEN_DEFAULT_PIPEOFFSETS,
335 IVB_CURSOR_OFFSETS, 333 IVB_CURSOR_OFFSETS,
@@ -341,6 +339,7 @@ static const struct intel_device_info intel_broadwell_gt3m_info = {
341 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 339 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
342 .has_llc = 1, 340 .has_llc = 1,
343 .has_ddi = 1, 341 .has_ddi = 1,
342 .has_fpga_dbg = 1,
344 .has_fbc = 1, 343 .has_fbc = 1,
345 GEN_DEFAULT_PIPEOFFSETS, 344 GEN_DEFAULT_PIPEOFFSETS,
346 IVB_CURSOR_OFFSETS, 345 IVB_CURSOR_OFFSETS,
@@ -482,10 +481,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
482 if (i915.semaphores >= 0) 481 if (i915.semaphores >= 0)
483 return i915.semaphores; 482 return i915.semaphores;
484 483
485 /* Until we get further testing... */
486 if (IS_GEN8(dev))
487 return false;
488
489#ifdef CONFIG_INTEL_IOMMU 484#ifdef CONFIG_INTEL_IOMMU
490 /* Enable semaphores on SNB when IO remapping is off */ 485 /* Enable semaphores on SNB when IO remapping is off */
491 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) 486 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
@@ -499,8 +494,7 @@ static int i915_drm_freeze(struct drm_device *dev)
499{ 494{
500 struct drm_i915_private *dev_priv = dev->dev_private; 495 struct drm_i915_private *dev_priv = dev->dev_private;
501 struct drm_crtc *crtc; 496 struct drm_crtc *crtc;
502 497 pci_power_t opregion_target_state;
503 intel_runtime_pm_get(dev_priv);
504 498
505 /* ignore lid events during suspend */ 499 /* ignore lid events during suspend */
506 mutex_lock(&dev_priv->modeset_restore_lock); 500 mutex_lock(&dev_priv->modeset_restore_lock);
@@ -526,21 +520,23 @@ static int i915_drm_freeze(struct drm_device *dev)
526 return error; 520 return error;
527 } 521 }
528 522
529 drm_irq_uninstall(dev);
530 dev_priv->enable_hotplug_processing = false;
531
532 intel_disable_gt_powersave(dev);
533
534 /* 523 /*
535 * Disable CRTCs directly since we want to preserve sw state 524 * Disable CRTCs directly since we want to preserve sw state
536 * for _thaw. 525 * for _thaw. Also, power gate the CRTC power wells.
537 */ 526 */
538 drm_modeset_lock_all(dev); 527 drm_modeset_lock_all(dev);
539 for_each_crtc(dev, crtc) { 528 for_each_crtc(dev, crtc)
540 dev_priv->display.crtc_disable(crtc); 529 intel_crtc_control(crtc, false);
541 }
542 drm_modeset_unlock_all(dev); 530 drm_modeset_unlock_all(dev);
543 531
532 intel_dp_mst_suspend(dev);
533
534 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
535
536 intel_runtime_pm_disable_interrupts(dev);
537
538 intel_suspend_gt_powersave(dev);
539
544 intel_modeset_suspend_hw(dev); 540 intel_modeset_suspend_hw(dev);
545 } 541 }
546 542
@@ -548,8 +544,15 @@ static int i915_drm_freeze(struct drm_device *dev)
548 544
549 i915_save_state(dev); 545 i915_save_state(dev);
550 546
547 opregion_target_state = PCI_D3cold;
548#if IS_ENABLED(CONFIG_ACPI_SLEEP)
549 if (acpi_target_system_state() < ACPI_STATE_S3)
550 opregion_target_state = PCI_D1;
551#endif
552 intel_opregion_notify_adapter(dev, opregion_target_state);
553
554 intel_uncore_forcewake_reset(dev, false);
551 intel_opregion_fini(dev); 555 intel_opregion_fini(dev);
552 intel_uncore_fini(dev);
553 556
554 console_lock(); 557 console_lock();
555 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED); 558 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
@@ -557,6 +560,8 @@ static int i915_drm_freeze(struct drm_device *dev)
557 560
558 dev_priv->suspend_count++; 561 dev_priv->suspend_count++;
559 562
563 intel_display_set_init_power(dev_priv, false);
564
560 return 0; 565 return 0;
561} 566}
562 567
@@ -606,7 +611,10 @@ static int i915_drm_thaw_early(struct drm_device *dev)
606{ 611{
607 struct drm_i915_private *dev_priv = dev->dev_private; 612 struct drm_i915_private *dev_priv = dev->dev_private;
608 613
609 intel_uncore_early_sanitize(dev); 614 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
615 hsw_disable_pc8(dev_priv);
616
617 intel_uncore_early_sanitize(dev, true);
610 intel_uncore_sanitize(dev); 618 intel_uncore_sanitize(dev);
611 intel_power_domains_init_hw(dev_priv); 619 intel_power_domains_init_hw(dev_priv);
612 620
@@ -639,11 +647,19 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
639 } 647 }
640 mutex_unlock(&dev->struct_mutex); 648 mutex_unlock(&dev->struct_mutex);
641 649
642 /* We need working interrupts for modeset enabling ... */ 650 intel_runtime_pm_restore_interrupts(dev);
643 drm_irq_install(dev, dev->pdev->irq);
644 651
645 intel_modeset_init_hw(dev); 652 intel_modeset_init_hw(dev);
646 653
654 {
655 unsigned long irqflags;
656 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
657 if (dev_priv->display.hpd_irq_setup)
658 dev_priv->display.hpd_irq_setup(dev);
659 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
660 }
661
662 intel_dp_mst_resume(dev);
647 drm_modeset_lock_all(dev); 663 drm_modeset_lock_all(dev);
648 intel_modeset_setup_hw_state(dev, true); 664 intel_modeset_setup_hw_state(dev, true);
649 drm_modeset_unlock_all(dev); 665 drm_modeset_unlock_all(dev);
@@ -655,7 +671,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
655 * notifications. 671 * notifications.
656 * */ 672 * */
657 intel_hpd_init(dev); 673 intel_hpd_init(dev);
658 dev_priv->enable_hotplug_processing = true;
659 /* Config may have changed between suspend and resume */ 674 /* Config may have changed between suspend and resume */
660 drm_helper_hpd_irq_event(dev); 675 drm_helper_hpd_irq_event(dev);
661 } 676 }
@@ -678,7 +693,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
678 dev_priv->modeset_restore = MODESET_DONE; 693 dev_priv->modeset_restore = MODESET_DONE;
679 mutex_unlock(&dev_priv->modeset_restore_lock); 694 mutex_unlock(&dev_priv->modeset_restore_lock);
680 695
681 intel_runtime_pm_put(dev_priv); 696 intel_opregion_notify_adapter(dev, PCI_D0);
697
682 return 0; 698 return 0;
683} 699}
684 700
@@ -887,6 +903,7 @@ static int i915_pm_suspend_late(struct device *dev)
887{ 903{
888 struct pci_dev *pdev = to_pci_dev(dev); 904 struct pci_dev *pdev = to_pci_dev(dev);
889 struct drm_device *drm_dev = pci_get_drvdata(pdev); 905 struct drm_device *drm_dev = pci_get_drvdata(pdev);
906 struct drm_i915_private *dev_priv = drm_dev->dev_private;
890 907
891 /* 908 /*
892 * We have a suspedn ordering issue with the snd-hda driver also 909 * We have a suspedn ordering issue with the snd-hda driver also
@@ -900,6 +917,9 @@ static int i915_pm_suspend_late(struct device *dev)
900 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 917 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
901 return 0; 918 return 0;
902 919
920 if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
921 hsw_enable_pc8(dev_priv);
922
903 pci_disable_device(pdev); 923 pci_disable_device(pdev);
904 pci_set_power_state(pdev, PCI_D3hot); 924 pci_set_power_state(pdev, PCI_D3hot);
905 925
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1f7700897dfc..a26eec285da7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -53,7 +53,7 @@
53 53
54#define DRIVER_NAME "i915" 54#define DRIVER_NAME "i915"
55#define DRIVER_DESC "Intel Graphics" 55#define DRIVER_DESC "Intel Graphics"
56#define DRIVER_DATE "20080730" 56#define DRIVER_DATE "20140620"
57 57
58enum pipe { 58enum pipe {
59 INVALID_PIPE = -1, 59 INVALID_PIPE = -1,
@@ -129,6 +129,7 @@ enum intel_display_power_domain {
129 POWER_DOMAIN_PORT_OTHER, 129 POWER_DOMAIN_PORT_OTHER,
130 POWER_DOMAIN_VGA, 130 POWER_DOMAIN_VGA,
131 POWER_DOMAIN_AUDIO, 131 POWER_DOMAIN_AUDIO,
132 POWER_DOMAIN_PLLS,
132 POWER_DOMAIN_INIT, 133 POWER_DOMAIN_INIT,
133 134
134 POWER_DOMAIN_NUM, 135 POWER_DOMAIN_NUM,
@@ -178,14 +179,20 @@ enum hpd_pin {
178 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ 179 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
179 if ((intel_connector)->base.encoder == (__encoder)) 180 if ((intel_connector)->base.encoder == (__encoder))
180 181
182#define for_each_power_domain(domain, mask) \
183 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
184 if ((1 << (domain)) & (mask))
185
181struct drm_i915_private; 186struct drm_i915_private;
182struct i915_mmu_object; 187struct i915_mmu_object;
183 188
184enum intel_dpll_id { 189enum intel_dpll_id {
185 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ 190 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
186 /* real shared dpll ids must be >= 0 */ 191 /* real shared dpll ids must be >= 0 */
187 DPLL_ID_PCH_PLL_A, 192 DPLL_ID_PCH_PLL_A = 0,
188 DPLL_ID_PCH_PLL_B, 193 DPLL_ID_PCH_PLL_B = 1,
194 DPLL_ID_WRPLL1 = 0,
195 DPLL_ID_WRPLL2 = 1,
189}; 196};
190#define I915_NUM_PLLS 2 197#define I915_NUM_PLLS 2
191 198
@@ -194,6 +201,7 @@ struct intel_dpll_hw_state {
194 uint32_t dpll_md; 201 uint32_t dpll_md;
195 uint32_t fp0; 202 uint32_t fp0;
196 uint32_t fp1; 203 uint32_t fp1;
204 uint32_t wrpll;
197}; 205};
198 206
199struct intel_shared_dpll { 207struct intel_shared_dpll {
@@ -204,6 +212,8 @@ struct intel_shared_dpll {
204 /* should match the index in the dev_priv->shared_dplls array */ 212 /* should match the index in the dev_priv->shared_dplls array */
205 enum intel_dpll_id id; 213 enum intel_dpll_id id;
206 struct intel_dpll_hw_state hw_state; 214 struct intel_dpll_hw_state hw_state;
215 /* The mode_set hook is optional and should be used together with the
216 * intel_prepare_shared_dpll function. */
207 void (*mode_set)(struct drm_i915_private *dev_priv, 217 void (*mode_set)(struct drm_i915_private *dev_priv,
208 struct intel_shared_dpll *pll); 218 struct intel_shared_dpll *pll);
209 void (*enable)(struct drm_i915_private *dev_priv, 219 void (*enable)(struct drm_i915_private *dev_priv,
@@ -228,12 +238,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
228 int pixel_clock, int link_clock, 238 int pixel_clock, int link_clock,
229 struct intel_link_m_n *m_n); 239 struct intel_link_m_n *m_n);
230 240
231struct intel_ddi_plls {
232 int spll_refcount;
233 int wrpll1_refcount;
234 int wrpll2_refcount;
235};
236
237/* Interface history: 241/* Interface history:
238 * 242 *
239 * 1.1: Original. 243 * 1.1: Original.
@@ -324,6 +328,7 @@ struct drm_i915_error_state {
324 u64 fence[I915_MAX_NUM_FENCES]; 328 u64 fence[I915_MAX_NUM_FENCES];
325 struct intel_overlay_error_state *overlay; 329 struct intel_overlay_error_state *overlay;
326 struct intel_display_error_state *display; 330 struct intel_display_error_state *display;
331 struct drm_i915_error_object *semaphore_obj;
327 332
328 struct drm_i915_error_ring { 333 struct drm_i915_error_ring {
329 bool valid; 334 bool valid;
@@ -435,8 +440,8 @@ struct drm_i915_display_funcs {
435 void (*update_wm)(struct drm_crtc *crtc); 440 void (*update_wm)(struct drm_crtc *crtc);
436 void (*update_sprite_wm)(struct drm_plane *plane, 441 void (*update_sprite_wm)(struct drm_plane *plane,
437 struct drm_crtc *crtc, 442 struct drm_crtc *crtc,
438 uint32_t sprite_width, int pixel_size, 443 uint32_t sprite_width, uint32_t sprite_height,
439 bool enable, bool scaled); 444 int pixel_size, bool enable, bool scaled);
440 void (*modeset_global_resources)(struct drm_device *dev); 445 void (*modeset_global_resources)(struct drm_device *dev);
441 /* Returns the active state of the crtc, and if the crtc is active, 446 /* Returns the active state of the crtc, and if the crtc is active,
442 * fills out the pipe-config with the hw state. */ 447 * fills out the pipe-config with the hw state. */
@@ -552,8 +557,6 @@ struct intel_device_info {
552 /* Register offsets for the various display pipes and transcoders */ 557 /* Register offsets for the various display pipes and transcoders */
553 int pipe_offsets[I915_MAX_TRANSCODERS]; 558 int pipe_offsets[I915_MAX_TRANSCODERS];
554 int trans_offsets[I915_MAX_TRANSCODERS]; 559 int trans_offsets[I915_MAX_TRANSCODERS];
555 int dpll_offsets[I915_MAX_PIPES];
556 int dpll_md_offsets[I915_MAX_PIPES];
557 int palette_offsets[I915_MAX_PIPES]; 560 int palette_offsets[I915_MAX_PIPES];
558 int cursor_offsets[I915_MAX_PIPES]; 561 int cursor_offsets[I915_MAX_PIPES];
559}; 562};
@@ -586,28 +589,48 @@ struct i915_ctx_hang_stats {
586}; 589};
587 590
588/* This must match up with the value previously used for execbuf2.rsvd1. */ 591/* This must match up with the value previously used for execbuf2.rsvd1. */
589#define DEFAULT_CONTEXT_ID 0 592#define DEFAULT_CONTEXT_HANDLE 0
593/**
594 * struct intel_context - as the name implies, represents a context.
595 * @ref: reference count.
596 * @user_handle: userspace tracking identity for this context.
597 * @remap_slice: l3 row remapping information.
598 * @file_priv: filp associated with this context (NULL for global default
599 * context).
600 * @hang_stats: information about the role of this context in possible GPU
601 * hangs.
602 * @vm: virtual memory space used by this context.
603 * @legacy_hw_ctx: render context backing object and whether it is correctly
604 * initialized (legacy ring submission mechanism only).
605 * @link: link in the global list of contexts.
606 *
607 * Contexts are memory images used by the hardware to store copies of their
608 * internal state.
609 */
590struct intel_context { 610struct intel_context {
591 struct kref ref; 611 struct kref ref;
592 int id; 612 int user_handle;
593 bool is_initialized;
594 uint8_t remap_slice; 613 uint8_t remap_slice;
595 struct drm_i915_file_private *file_priv; 614 struct drm_i915_file_private *file_priv;
596 struct intel_engine_cs *last_ring;
597 struct drm_i915_gem_object *obj;
598 struct i915_ctx_hang_stats hang_stats; 615 struct i915_ctx_hang_stats hang_stats;
599 struct i915_address_space *vm; 616 struct i915_address_space *vm;
600 617
618 struct {
619 struct drm_i915_gem_object *rcs_state;
620 bool initialized;
621 } legacy_hw_ctx;
622
601 struct list_head link; 623 struct list_head link;
602}; 624};
603 625
604struct i915_fbc { 626struct i915_fbc {
605 unsigned long size; 627 unsigned long size;
628 unsigned threshold;
606 unsigned int fb_id; 629 unsigned int fb_id;
607 enum plane plane; 630 enum plane plane;
608 int y; 631 int y;
609 632
610 struct drm_mm_node *compressed_fb; 633 struct drm_mm_node compressed_fb;
611 struct drm_mm_node *compressed_llb; 634 struct drm_mm_node *compressed_llb;
612 635
613 struct intel_fbc_work { 636 struct intel_fbc_work {
@@ -635,9 +658,15 @@ struct i915_drrs {
635 struct intel_connector *connector; 658 struct intel_connector *connector;
636}; 659};
637 660
661struct intel_dp;
638struct i915_psr { 662struct i915_psr {
663 struct mutex lock;
639 bool sink_support; 664 bool sink_support;
640 bool source_ok; 665 bool source_ok;
666 struct intel_dp *enabled;
667 bool active;
668 struct delayed_work work;
669 unsigned busy_frontbuffer_bits;
641}; 670};
642 671
643enum intel_pch { 672enum intel_pch {
@@ -880,6 +909,12 @@ struct vlv_s0ix_state {
880 u32 clock_gate_dis2; 909 u32 clock_gate_dis2;
881}; 910};
882 911
912struct intel_rps_ei {
913 u32 cz_clock;
914 u32 render_c0;
915 u32 media_c0;
916};
917
883struct intel_gen6_power_mgmt { 918struct intel_gen6_power_mgmt {
884 /* work and pm_iir are protected by dev_priv->irq_lock */ 919 /* work and pm_iir are protected by dev_priv->irq_lock */
885 struct work_struct work; 920 struct work_struct work;
@@ -903,6 +938,9 @@ struct intel_gen6_power_mgmt {
903 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 938 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
904 u8 rp1_freq; /* "less than" RP0 power/freqency */ 939 u8 rp1_freq; /* "less than" RP0 power/freqency */
905 u8 rp0_freq; /* Non-overclocked max frequency. */ 940 u8 rp0_freq; /* Non-overclocked max frequency. */
941 u32 cz_freq;
942
943 u32 ei_interrupt_count;
906 944
907 int last_adj; 945 int last_adj;
908 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 946 enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
@@ -910,6 +948,9 @@ struct intel_gen6_power_mgmt {
910 bool enabled; 948 bool enabled;
911 struct delayed_work delayed_resume_work; 949 struct delayed_work delayed_resume_work;
912 950
951 /* manual wa residency calculations */
952 struct intel_rps_ei up_ei, down_ei;
953
913 /* 954 /*
914 * Protects RPS/RC6 register access and PCU communication. 955 * Protects RPS/RC6 register access and PCU communication.
915 * Must be taken after struct_mutex if nested. 956 * Must be taken after struct_mutex if nested.
@@ -1230,6 +1271,7 @@ struct intel_vbt_data {
1230 u16 pwm_freq_hz; 1271 u16 pwm_freq_hz;
1231 bool present; 1272 bool present;
1232 bool active_low_pwm; 1273 bool active_low_pwm;
1274 u8 min_brightness; /* min_brightness/255 of max */
1233 } backlight; 1275 } backlight;
1234 1276
1235 /* MIPI DSI */ 1277 /* MIPI DSI */
@@ -1299,7 +1341,7 @@ struct ilk_wm_values {
1299 */ 1341 */
1300struct i915_runtime_pm { 1342struct i915_runtime_pm {
1301 bool suspended; 1343 bool suspended;
1302 bool irqs_disabled; 1344 bool _irqs_disabled;
1303}; 1345};
1304 1346
1305enum intel_pipe_crc_source { 1347enum intel_pipe_crc_source {
@@ -1332,6 +1374,17 @@ struct intel_pipe_crc {
1332 wait_queue_head_t wq; 1374 wait_queue_head_t wq;
1333}; 1375};
1334 1376
1377struct i915_frontbuffer_tracking {
1378 struct mutex lock;
1379
1380 /*
1381 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
1382 * scheduled flips.
1383 */
1384 unsigned busy_bits;
1385 unsigned flip_bits;
1386};
1387
1335struct drm_i915_private { 1388struct drm_i915_private {
1336 struct drm_device *dev; 1389 struct drm_device *dev;
1337 struct kmem_cache *slab; 1390 struct kmem_cache *slab;
@@ -1363,6 +1416,7 @@ struct drm_i915_private {
1363 1416
1364 struct pci_dev *bridge_dev; 1417 struct pci_dev *bridge_dev;
1365 struct intel_engine_cs ring[I915_NUM_RINGS]; 1418 struct intel_engine_cs ring[I915_NUM_RINGS];
1419 struct drm_i915_gem_object *semaphore_obj;
1366 uint32_t last_seqno, next_seqno; 1420 uint32_t last_seqno, next_seqno;
1367 1421
1368 drm_dma_handle_t *status_page_dmah; 1422 drm_dma_handle_t *status_page_dmah;
@@ -1371,6 +1425,9 @@ struct drm_i915_private {
1371 /* protects the irq masks */ 1425 /* protects the irq masks */
1372 spinlock_t irq_lock; 1426 spinlock_t irq_lock;
1373 1427
1428 /* protects the mmio flip data */
1429 spinlock_t mmio_flip_lock;
1430
1374 bool display_irqs_enabled; 1431 bool display_irqs_enabled;
1375 1432
1376 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1433 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
@@ -1390,7 +1447,6 @@ struct drm_i915_private {
1390 u32 pipestat_irq_mask[I915_MAX_PIPES]; 1447 u32 pipestat_irq_mask[I915_MAX_PIPES];
1391 1448
1392 struct work_struct hotplug_work; 1449 struct work_struct hotplug_work;
1393 bool enable_hotplug_processing;
1394 struct { 1450 struct {
1395 unsigned long hpd_last_jiffies; 1451 unsigned long hpd_last_jiffies;
1396 int hpd_cnt; 1452 int hpd_cnt;
@@ -1467,7 +1523,6 @@ struct drm_i915_private {
1467 1523
1468 int num_shared_dpll; 1524 int num_shared_dpll;
1469 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1525 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1470 struct intel_ddi_plls ddi_plls;
1471 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1526 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
1472 1527
1473 /* Reclocking support */ 1528 /* Reclocking support */
@@ -1475,6 +1530,9 @@ struct drm_i915_private {
1475 bool lvds_downclock_avail; 1530 bool lvds_downclock_avail;
1476 /* indicates the reduced downclock for LVDS*/ 1531 /* indicates the reduced downclock for LVDS*/
1477 int lvds_downclock; 1532 int lvds_downclock;
1533
1534 struct i915_frontbuffer_tracking fb_tracking;
1535
1478 u16 orig_clock; 1536 u16 orig_clock;
1479 1537
1480 bool mchbar_need_disable; 1538 bool mchbar_need_disable;
@@ -1541,6 +1599,20 @@ struct drm_i915_private {
1541 1599
1542 struct i915_runtime_pm pm; 1600 struct i915_runtime_pm pm;
1543 1601
1602 struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
1603 u32 long_hpd_port_mask;
1604 u32 short_hpd_port_mask;
1605 struct work_struct dig_port_work;
1606
1607 /*
1608 * if we get a HPD irq from DP and a HPD irq from non-DP
1609 * the non-DP HPD could block the workqueue on a mode config
1610 * mutex getting, that userspace may have taken. However
1611 * userspace is waiting on the DP workqueue to run which is
1612 * blocked behind the non-DP one.
1613 */
1614 struct workqueue_struct *dp_wq;
1615
1544 /* Old dri1 support infrastructure, beware the dragons ya fools entering 1616 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1545 * here! */ 1617 * here! */
1546 struct i915_dri1_state dri1; 1618 struct i915_dri1_state dri1;
@@ -1592,6 +1664,28 @@ struct drm_i915_gem_object_ops {
1592 void (*release)(struct drm_i915_gem_object *); 1664 void (*release)(struct drm_i915_gem_object *);
1593}; 1665};
1594 1666
1667/*
1668 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
1669 * considered to be the frontbuffer for the given plane interface-vise. This
1670 * doesn't mean that the hw necessarily already scans it out, but that any
1671 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
1672 *
1673 * We have one bit per pipe and per scanout plane type.
1674 */
1675#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4
1676#define INTEL_FRONTBUFFER_BITS \
1677 (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
1678#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
1679 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
1680#define INTEL_FRONTBUFFER_CURSOR(pipe) \
1681 (1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
1682#define INTEL_FRONTBUFFER_SPRITE(pipe) \
1683 (1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
1684#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
1685 (1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
1686#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
1687 (0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
1688
1595struct drm_i915_gem_object { 1689struct drm_i915_gem_object {
1596 struct drm_gem_object base; 1690 struct drm_gem_object base;
1597 1691
@@ -1662,6 +1756,12 @@ struct drm_i915_gem_object {
1662 unsigned int pin_display:1; 1756 unsigned int pin_display:1;
1663 1757
1664 /* 1758 /*
1759 * Is the object to be mapped as read-only to the GPU
1760 * Only honoured if hardware has relevant pte bit
1761 */
1762 unsigned long gt_ro:1;
1763
1764 /*
1665 * Is the GPU currently using a fence to access this buffer, 1765 * Is the GPU currently using a fence to access this buffer,
1666 */ 1766 */
1667 unsigned int pending_fenced_gpu_access:1; 1767 unsigned int pending_fenced_gpu_access:1;
@@ -1673,6 +1773,8 @@ struct drm_i915_gem_object {
1673 unsigned int has_global_gtt_mapping:1; 1773 unsigned int has_global_gtt_mapping:1;
1674 unsigned int has_dma_mapping:1; 1774 unsigned int has_dma_mapping:1;
1675 1775
1776 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
1777
1676 struct sg_table *pages; 1778 struct sg_table *pages;
1677 int pages_pin_count; 1779 int pages_pin_count;
1678 1780
@@ -1719,6 +1821,10 @@ struct drm_i915_gem_object {
1719}; 1821};
1720#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 1822#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1721 1823
1824void i915_gem_track_fb(struct drm_i915_gem_object *old,
1825 struct drm_i915_gem_object *new,
1826 unsigned frontbuffer_bits);
1827
1722/** 1828/**
1723 * Request queue structure. 1829 * Request queue structure.
1724 * 1830 *
@@ -1940,10 +2046,8 @@ struct drm_i915_cmd_table {
1940#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 2046#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1941 2047
1942#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 2048#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
1943#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && \ 2049#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
1944 (!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))) 2050#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
1945#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 \
1946 && !IS_GEN8(dev))
1947#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false) 2051#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
1948#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true) 2052#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
1949 2053
@@ -1998,6 +2102,8 @@ struct drm_i915_cmd_table {
1998#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) 2102#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
1999#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) 2103#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
2000 2104
2105#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
2106
2001/* DPF == dynamic parity feature */ 2107/* DPF == dynamic parity feature */
2002#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 2108#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
2003#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) 2109#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
@@ -2040,6 +2146,8 @@ struct i915_params {
2040 bool reset; 2146 bool reset;
2041 bool disable_display; 2147 bool disable_display;
2042 bool disable_vtd_wa; 2148 bool disable_vtd_wa;
2149 int use_mmio_flip;
2150 bool mmio_debug;
2043}; 2151};
2044extern struct i915_params i915 __read_mostly; 2152extern struct i915_params i915 __read_mostly;
2045 2153
@@ -2048,12 +2156,12 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev);
2048extern void i915_kernel_lost_context(struct drm_device * dev); 2156extern void i915_kernel_lost_context(struct drm_device * dev);
2049extern int i915_driver_load(struct drm_device *, unsigned long flags); 2157extern int i915_driver_load(struct drm_device *, unsigned long flags);
2050extern int i915_driver_unload(struct drm_device *); 2158extern int i915_driver_unload(struct drm_device *);
2051extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); 2159extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
2052extern void i915_driver_lastclose(struct drm_device * dev); 2160extern void i915_driver_lastclose(struct drm_device * dev);
2053extern void i915_driver_preclose(struct drm_device *dev, 2161extern void i915_driver_preclose(struct drm_device *dev,
2054 struct drm_file *file_priv); 2162 struct drm_file *file);
2055extern void i915_driver_postclose(struct drm_device *dev, 2163extern void i915_driver_postclose(struct drm_device *dev,
2056 struct drm_file *file_priv); 2164 struct drm_file *file);
2057extern int i915_driver_device_is_agp(struct drm_device * dev); 2165extern int i915_driver_device_is_agp(struct drm_device * dev);
2058#ifdef CONFIG_COMPAT 2166#ifdef CONFIG_COMPAT
2059extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2167extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
@@ -2084,10 +2192,12 @@ extern void intel_irq_init(struct drm_device *dev);
2084extern void intel_hpd_init(struct drm_device *dev); 2192extern void intel_hpd_init(struct drm_device *dev);
2085 2193
2086extern void intel_uncore_sanitize(struct drm_device *dev); 2194extern void intel_uncore_sanitize(struct drm_device *dev);
2087extern void intel_uncore_early_sanitize(struct drm_device *dev); 2195extern void intel_uncore_early_sanitize(struct drm_device *dev,
2196 bool restore_forcewake);
2088extern void intel_uncore_init(struct drm_device *dev); 2197extern void intel_uncore_init(struct drm_device *dev);
2089extern void intel_uncore_check_errors(struct drm_device *dev); 2198extern void intel_uncore_check_errors(struct drm_device *dev);
2090extern void intel_uncore_fini(struct drm_device *dev); 2199extern void intel_uncore_fini(struct drm_device *dev);
2200extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
2091 2201
2092void 2202void
2093i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 2203i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -2235,6 +2345,8 @@ bool i915_gem_retire_requests(struct drm_device *dev);
2235void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); 2345void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
2236int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, 2346int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
2237 bool interruptible); 2347 bool interruptible);
2348int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
2349
2238static inline bool i915_reset_in_progress(struct i915_gpu_error *error) 2350static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
2239{ 2351{
2240 return unlikely(atomic_read(&error->reset_counter) 2352 return unlikely(atomic_read(&error->reset_counter)
@@ -2404,7 +2516,7 @@ static inline void i915_gem_context_unreference(struct intel_context *ctx)
2404 2516
2405static inline bool i915_gem_context_is_default(const struct intel_context *c) 2517static inline bool i915_gem_context_is_default(const struct intel_context *c)
2406{ 2518{
2407 return c->id == DEFAULT_CONTEXT_ID; 2519 return c->user_handle == DEFAULT_CONTEXT_HANDLE;
2408} 2520}
2409 2521
2410int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 2522int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -2435,7 +2547,7 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
2435 2547
2436/* i915_gem_stolen.c */ 2548/* i915_gem_stolen.c */
2437int i915_gem_init_stolen(struct drm_device *dev); 2549int i915_gem_init_stolen(struct drm_device *dev);
2438int i915_gem_stolen_setup_compression(struct drm_device *dev, int size); 2550int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
2439void i915_gem_stolen_cleanup_compression(struct drm_device *dev); 2551void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
2440void i915_gem_cleanup_stolen(struct drm_device *dev); 2552void i915_gem_cleanup_stolen(struct drm_device *dev);
2441struct drm_i915_gem_object * 2553struct drm_i915_gem_object *
@@ -2445,7 +2557,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
2445 u32 stolen_offset, 2557 u32 stolen_offset,
2446 u32 gtt_offset, 2558 u32 gtt_offset,
2447 u32 size); 2559 u32 size);
2448void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
2449 2560
2450/* i915_gem_tiling.c */ 2561/* i915_gem_tiling.c */
2451static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 2562static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@ -2593,8 +2704,8 @@ extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
2593extern void intel_init_pch_refclk(struct drm_device *dev); 2704extern void intel_init_pch_refclk(struct drm_device *dev);
2594extern void gen6_set_rps(struct drm_device *dev, u8 val); 2705extern void gen6_set_rps(struct drm_device *dev, u8 val);
2595extern void valleyview_set_rps(struct drm_device *dev, u8 val); 2706extern void valleyview_set_rps(struct drm_device *dev, u8 val);
2596extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv); 2707extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
2597extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv); 2708 bool enable);
2598extern void intel_detect_pch(struct drm_device *dev); 2709extern void intel_detect_pch(struct drm_device *dev);
2599extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 2710extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
2600extern int intel_enable_rc6(const struct drm_device *dev); 2711extern int intel_enable_rc6(const struct drm_device *dev);
@@ -2605,6 +2716,8 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
2605int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, 2716int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
2606 struct drm_file *file); 2717 struct drm_file *file);
2607 2718
2719void intel_notify_mmio_flip(struct intel_engine_cs *ring);
2720
2608/* overlay */ 2721/* overlay */
2609extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 2722extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
2610extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 2723extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
@@ -2700,10 +2813,10 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
2700 2813
2701static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) 2814static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
2702{ 2815{
2703 if (HAS_PCH_SPLIT(dev)) 2816 if (IS_VALLEYVIEW(dev))
2704 return CPU_VGACNTRL;
2705 else if (IS_VALLEYVIEW(dev))
2706 return VLV_VGACNTRL; 2817 return VLV_VGACNTRL;
2818 else if (INTEL_INFO(dev)->gen >= 5)
2819 return CPU_VGACNTRL;
2707 else 2820 else
2708 return VGACNTRL; 2821 return VGACNTRL;
2709} 2822}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f247d922e44a..215185050ff1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1095,7 +1095,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
1095 * Compare seqno against outstanding lazy request. Emit a request if they are 1095 * Compare seqno against outstanding lazy request. Emit a request if they are
1096 * equal. 1096 * equal.
1097 */ 1097 */
1098static int 1098int
1099i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno) 1099i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
1100{ 1100{
1101 int ret; 1101 int ret;
@@ -1161,14 +1161,14 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1161 s64 before, now; 1161 s64 before, now;
1162 int ret; 1162 int ret;
1163 1163
1164 WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n"); 1164 WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1165 1165
1166 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) 1166 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1167 return 0; 1167 return 0;
1168 1168
1169 timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0; 1169 timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
1170 1170
1171 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) { 1171 if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
1172 gen6_rps_boost(dev_priv); 1172 gen6_rps_boost(dev_priv);
1173 if (file_priv) 1173 if (file_priv)
1174 mod_delayed_work(dev_priv->wq, 1174 mod_delayed_work(dev_priv->wq,
@@ -1560,14 +1560,29 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1560 if (ret) 1560 if (ret)
1561 goto unpin; 1561 goto unpin;
1562 1562
1563 obj->fault_mappable = true; 1563 /* Finally, remap it using the new GTT offset */
1564
1565 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj); 1564 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1566 pfn >>= PAGE_SHIFT; 1565 pfn >>= PAGE_SHIFT;
1567 pfn += page_offset;
1568 1566
1569 /* Finally, remap it using the new GTT offset */ 1567 if (!obj->fault_mappable) {
1570 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 1568 unsigned long size = min_t(unsigned long,
1569 vma->vm_end - vma->vm_start,
1570 obj->base.size);
1571 int i;
1572
1573 for (i = 0; i < size >> PAGE_SHIFT; i++) {
1574 ret = vm_insert_pfn(vma,
1575 (unsigned long)vma->vm_start + i * PAGE_SIZE,
1576 pfn + i);
1577 if (ret)
1578 break;
1579 }
1580
1581 obj->fault_mappable = true;
1582 } else
1583 ret = vm_insert_pfn(vma,
1584 (unsigned long)vmf->virtual_address,
1585 pfn + page_offset);
1571unpin: 1586unpin:
1572 i915_gem_object_ggtt_unpin(obj); 1587 i915_gem_object_ggtt_unpin(obj);
1573unlock: 1588unlock:
@@ -2051,16 +2066,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2051 * our own buffer, now let the real VM do its job and 2066 * our own buffer, now let the real VM do its job and
2052 * go down in flames if truly OOM. 2067 * go down in flames if truly OOM.
2053 */ 2068 */
2054 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
2055 gfp |= __GFP_IO | __GFP_WAIT;
2056
2057 i915_gem_shrink_all(dev_priv); 2069 i915_gem_shrink_all(dev_priv);
2058 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2070 page = shmem_read_mapping_page(mapping, i);
2059 if (IS_ERR(page)) 2071 if (IS_ERR(page))
2060 goto err_pages; 2072 goto err_pages;
2061
2062 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
2063 gfp &= ~(__GFP_IO | __GFP_WAIT);
2064 } 2073 }
2065#ifdef CONFIG_SWIOTLB 2074#ifdef CONFIG_SWIOTLB
2066 if (swiotlb_nr_tbl()) { 2075 if (swiotlb_nr_tbl()) {
@@ -2209,6 +2218,8 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2209 list_move_tail(&vma->mm_list, &vm->inactive_list); 2218 list_move_tail(&vma->mm_list, &vm->inactive_list);
2210 } 2219 }
2211 2220
2221 intel_fb_obj_flush(obj, true);
2222
2212 list_del_init(&obj->ring_list); 2223 list_del_init(&obj->ring_list);
2213 obj->ring = NULL; 2224 obj->ring = NULL;
2214 2225
@@ -2318,7 +2329,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
2318 u32 request_ring_position, request_start; 2329 u32 request_ring_position, request_start;
2319 int ret; 2330 int ret;
2320 2331
2321 request_start = intel_ring_get_tail(ring); 2332 request_start = intel_ring_get_tail(ring->buffer);
2322 /* 2333 /*
2323 * Emit any outstanding flushes - execbuf can fail to emit the flush 2334 * Emit any outstanding flushes - execbuf can fail to emit the flush
2324 * after having emitted the batchbuffer command. Hence we need to fix 2335 * after having emitted the batchbuffer command. Hence we need to fix
@@ -2339,7 +2350,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
2339 * GPU processing the request, we never over-estimate the 2350 * GPU processing the request, we never over-estimate the
2340 * position of the head. 2351 * position of the head.
2341 */ 2352 */
2342 request_ring_position = intel_ring_get_tail(ring); 2353 request_ring_position = intel_ring_get_tail(ring->buffer);
2343 2354
2344 ret = ring->add_request(ring); 2355 ret = ring->add_request(ring);
2345 if (ret) 2356 if (ret)
@@ -2822,6 +2833,8 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
2822 idx = intel_ring_sync_index(from, to); 2833 idx = intel_ring_sync_index(from, to);
2823 2834
2824 seqno = obj->last_read_seqno; 2835 seqno = obj->last_read_seqno;
2836 /* Optimization: Avoid semaphore sync when we are sure we already
2837 * waited for an object with higher seqno */
2825 if (seqno <= from->semaphore.sync_seqno[idx]) 2838 if (seqno <= from->semaphore.sync_seqno[idx])
2826 return 0; 2839 return 0;
2827 2840
@@ -2905,8 +2918,6 @@ int i915_vma_unbind(struct i915_vma *vma)
2905 2918
2906 vma->unbind_vma(vma); 2919 vma->unbind_vma(vma);
2907 2920
2908 i915_gem_gtt_finish_object(obj);
2909
2910 list_del_init(&vma->mm_list); 2921 list_del_init(&vma->mm_list);
2911 /* Avoid an unnecessary call to unbind on rebind. */ 2922 /* Avoid an unnecessary call to unbind on rebind. */
2912 if (i915_is_ggtt(vma->vm)) 2923 if (i915_is_ggtt(vma->vm))
@@ -2917,8 +2928,10 @@ int i915_vma_unbind(struct i915_vma *vma)
2917 2928
2918 /* Since the unbound list is global, only move to that list if 2929 /* Since the unbound list is global, only move to that list if
2919 * no more VMAs exist. */ 2930 * no more VMAs exist. */
2920 if (list_empty(&obj->vma_list)) 2931 if (list_empty(&obj->vma_list)) {
2932 i915_gem_gtt_finish_object(obj);
2921 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); 2933 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2934 }
2922 2935
2923 /* And finally now the object is completely decoupled from this vma, 2936 /* And finally now the object is completely decoupled from this vma,
2924 * we can drop its hold on the backing storage and allow it to be 2937 * we can drop its hold on the backing storage and allow it to be
@@ -3530,6 +3543,8 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3530 old_write_domain = obj->base.write_domain; 3543 old_write_domain = obj->base.write_domain;
3531 obj->base.write_domain = 0; 3544 obj->base.write_domain = 0;
3532 3545
3546 intel_fb_obj_flush(obj, false);
3547
3533 trace_i915_gem_object_change_domain(obj, 3548 trace_i915_gem_object_change_domain(obj,
3534 obj->base.read_domains, 3549 obj->base.read_domains,
3535 old_write_domain); 3550 old_write_domain);
@@ -3551,6 +3566,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3551 old_write_domain = obj->base.write_domain; 3566 old_write_domain = obj->base.write_domain;
3552 obj->base.write_domain = 0; 3567 obj->base.write_domain = 0;
3553 3568
3569 intel_fb_obj_flush(obj, false);
3570
3554 trace_i915_gem_object_change_domain(obj, 3571 trace_i915_gem_object_change_domain(obj,
3555 obj->base.read_domains, 3572 obj->base.read_domains,
3556 old_write_domain); 3573 old_write_domain);
@@ -3604,6 +3621,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3604 obj->dirty = 1; 3621 obj->dirty = 1;
3605 } 3622 }
3606 3623
3624 if (write)
3625 intel_fb_obj_invalidate(obj, NULL);
3626
3607 trace_i915_gem_object_change_domain(obj, 3627 trace_i915_gem_object_change_domain(obj,
3608 old_read_domains, 3628 old_read_domains,
3609 old_write_domain); 3629 old_write_domain);
@@ -3940,6 +3960,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3940 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3960 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3941 } 3961 }
3942 3962
3963 if (write)
3964 intel_fb_obj_invalidate(obj, NULL);
3965
3943 trace_i915_gem_object_change_domain(obj, 3966 trace_i915_gem_object_change_domain(obj,
3944 old_read_domains, 3967 old_read_domains,
3945 old_write_domain); 3968 old_write_domain);
@@ -4428,13 +4451,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4428 if (obj->stolen) 4451 if (obj->stolen)
4429 i915_gem_object_unpin_pages(obj); 4452 i915_gem_object_unpin_pages(obj);
4430 4453
4454 WARN_ON(obj->frontbuffer_bits);
4455
4431 if (WARN_ON(obj->pages_pin_count)) 4456 if (WARN_ON(obj->pages_pin_count))
4432 obj->pages_pin_count = 0; 4457 obj->pages_pin_count = 0;
4433 if (discard_backing_storage(obj)) 4458 if (discard_backing_storage(obj))
4434 obj->madv = I915_MADV_DONTNEED; 4459 obj->madv = I915_MADV_DONTNEED;
4435 i915_gem_object_put_pages(obj); 4460 i915_gem_object_put_pages(obj);
4436 i915_gem_object_free_mmap_offset(obj); 4461 i915_gem_object_free_mmap_offset(obj);
4437 i915_gem_object_release_stolen(obj);
4438 4462
4439 BUG_ON(obj->pages); 4463 BUG_ON(obj->pages);
4440 4464
@@ -4912,6 +4936,8 @@ i915_gem_load(struct drm_device *dev)
4912 4936
4913 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; 4937 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
4914 register_oom_notifier(&dev_priv->mm.oom_notifier); 4938 register_oom_notifier(&dev_priv->mm.oom_notifier);
4939
4940 mutex_init(&dev_priv->fb_tracking.lock);
4915} 4941}
4916 4942
4917void i915_gem_release(struct drm_device *dev, struct drm_file *file) 4943void i915_gem_release(struct drm_device *dev, struct drm_file *file)
@@ -4973,6 +4999,23 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4973 return ret; 4999 return ret;
4974} 5000}
4975 5001
5002void i915_gem_track_fb(struct drm_i915_gem_object *old,
5003 struct drm_i915_gem_object *new,
5004 unsigned frontbuffer_bits)
5005{
5006 if (old) {
5007 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5008 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5009 old->frontbuffer_bits &= ~frontbuffer_bits;
5010 }
5011
5012 if (new) {
5013 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5014 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5015 new->frontbuffer_bits |= frontbuffer_bits;
5016 }
5017}
5018
4976static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) 5019static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4977{ 5020{
4978 if (!mutex_is_locked(mutex)) 5021 if (!mutex_is_locked(mutex))
@@ -5055,12 +5098,13 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5055 vm == &dev_priv->mm.aliasing_ppgtt->base) 5098 vm == &dev_priv->mm.aliasing_ppgtt->base)
5056 vm = &dev_priv->gtt.base; 5099 vm = &dev_priv->gtt.base;
5057 5100
5058 BUG_ON(list_empty(&o->vma_list));
5059 list_for_each_entry(vma, &o->vma_list, vma_link) { 5101 list_for_each_entry(vma, &o->vma_list, vma_link) {
5060 if (vma->vm == vm) 5102 if (vma->vm == vm)
5061 return vma->node.start; 5103 return vma->node.start;
5062 5104
5063 } 5105 }
5106 WARN(1, "%s vma for this object not found.\n",
5107 i915_is_ggtt(vm) ? "global" : "ppgtt");
5064 return -1; 5108 return -1;
5065} 5109}
5066 5110
@@ -5141,8 +5185,11 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
5141 bool was_interruptible; 5185 bool was_interruptible;
5142 bool unlock; 5186 bool unlock;
5143 5187
5144 while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) 5188 while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
5145 schedule_timeout_killable(1); 5189 schedule_timeout_killable(1);
5190 if (fatal_signal_pending(current))
5191 return NOTIFY_DONE;
5192 }
5146 if (timeout == 0) { 5193 if (timeout == 0) {
5147 pr_err("Unable to purge GPU memory due lock contention.\n"); 5194 pr_err("Unable to purge GPU memory due lock contention.\n");
5148 return NOTIFY_DONE; 5195 return NOTIFY_DONE;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a5ddf3bce9c3..3b99390e467a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -182,22 +182,50 @@ void i915_gem_context_free(struct kref *ctx_ref)
182 typeof(*ctx), ref); 182 typeof(*ctx), ref);
183 struct i915_hw_ppgtt *ppgtt = NULL; 183 struct i915_hw_ppgtt *ppgtt = NULL;
184 184
185 if (ctx->obj) { 185 if (ctx->legacy_hw_ctx.rcs_state) {
186 /* We refcount even the aliasing PPGTT to keep the code symmetric */ 186 /* We refcount even the aliasing PPGTT to keep the code symmetric */
187 if (USES_PPGTT(ctx->obj->base.dev)) 187 if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
188 ppgtt = ctx_to_ppgtt(ctx); 188 ppgtt = ctx_to_ppgtt(ctx);
189
190 /* XXX: Free up the object before tearing down the address space, in
191 * case we're bound in the PPGTT */
192 drm_gem_object_unreference(&ctx->obj->base);
193 } 189 }
194 190
195 if (ppgtt) 191 if (ppgtt)
196 kref_put(&ppgtt->ref, ppgtt_release); 192 kref_put(&ppgtt->ref, ppgtt_release);
193 if (ctx->legacy_hw_ctx.rcs_state)
194 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
197 list_del(&ctx->link); 195 list_del(&ctx->link);
198 kfree(ctx); 196 kfree(ctx);
199} 197}
200 198
199static struct drm_i915_gem_object *
200i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
201{
202 struct drm_i915_gem_object *obj;
203 int ret;
204
205 obj = i915_gem_alloc_object(dev, size);
206 if (obj == NULL)
207 return ERR_PTR(-ENOMEM);
208
209 /*
210 * Try to make the context utilize L3 as well as LLC.
211 *
212 * On VLV we don't have L3 controls in the PTEs so we
213 * shouldn't touch the cache level, especially as that
214 * would make the object snooped which might have a
215 * negative performance impact.
216 */
217 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
218 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
219 /* Failure shouldn't ever happen this early */
220 if (WARN_ON(ret)) {
221 drm_gem_object_unreference(&obj->base);
222 return ERR_PTR(ret);
223 }
224 }
225
226 return obj;
227}
228
201static struct i915_hw_ppgtt * 229static struct i915_hw_ppgtt *
202create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx) 230create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
203{ 231{
@@ -234,40 +262,26 @@ __create_hw_context(struct drm_device *dev,
234 list_add_tail(&ctx->link, &dev_priv->context_list); 262 list_add_tail(&ctx->link, &dev_priv->context_list);
235 263
236 if (dev_priv->hw_context_size) { 264 if (dev_priv->hw_context_size) {
237 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size); 265 struct drm_i915_gem_object *obj =
238 if (ctx->obj == NULL) { 266 i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
239 ret = -ENOMEM; 267 if (IS_ERR(obj)) {
268 ret = PTR_ERR(obj);
240 goto err_out; 269 goto err_out;
241 } 270 }
242 271 ctx->legacy_hw_ctx.rcs_state = obj;
243 /*
244 * Try to make the context utilize L3 as well as LLC.
245 *
246 * On VLV we don't have L3 controls in the PTEs so we
247 * shouldn't touch the cache level, especially as that
248 * would make the object snooped which might have a
249 * negative performance impact.
250 */
251 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
252 ret = i915_gem_object_set_cache_level(ctx->obj,
253 I915_CACHE_L3_LLC);
254 /* Failure shouldn't ever happen this early */
255 if (WARN_ON(ret))
256 goto err_out;
257 }
258 } 272 }
259 273
260 /* Default context will never have a file_priv */ 274 /* Default context will never have a file_priv */
261 if (file_priv != NULL) { 275 if (file_priv != NULL) {
262 ret = idr_alloc(&file_priv->context_idr, ctx, 276 ret = idr_alloc(&file_priv->context_idr, ctx,
263 DEFAULT_CONTEXT_ID, 0, GFP_KERNEL); 277 DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
264 if (ret < 0) 278 if (ret < 0)
265 goto err_out; 279 goto err_out;
266 } else 280 } else
267 ret = DEFAULT_CONTEXT_ID; 281 ret = DEFAULT_CONTEXT_HANDLE;
268 282
269 ctx->file_priv = file_priv; 283 ctx->file_priv = file_priv;
270 ctx->id = ret; 284 ctx->user_handle = ret;
271 /* NB: Mark all slices as needing a remap so that when the context first 285 /* NB: Mark all slices as needing a remap so that when the context first
272 * loads it will restore whatever remap state already exists. If there 286 * loads it will restore whatever remap state already exists. If there
273 * is no remap info, it will be a NOP. */ 287 * is no remap info, it will be a NOP. */
@@ -301,7 +315,7 @@ i915_gem_create_context(struct drm_device *dev,
301 if (IS_ERR(ctx)) 315 if (IS_ERR(ctx))
302 return ctx; 316 return ctx;
303 317
304 if (is_global_default_ctx && ctx->obj) { 318 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
305 /* We may need to do things with the shrinker which 319 /* We may need to do things with the shrinker which
306 * require us to immediately switch back to the default 320 * require us to immediately switch back to the default
307 * context. This can cause a problem as pinning the 321 * context. This can cause a problem as pinning the
@@ -309,7 +323,7 @@ i915_gem_create_context(struct drm_device *dev,
309 * be available. To avoid this we always pin the default 323 * be available. To avoid this we always pin the default
310 * context. 324 * context.
311 */ 325 */
312 ret = i915_gem_obj_ggtt_pin(ctx->obj, 326 ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
313 get_context_alignment(dev), 0); 327 get_context_alignment(dev), 0);
314 if (ret) { 328 if (ret) {
315 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); 329 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
@@ -349,8 +363,8 @@ i915_gem_create_context(struct drm_device *dev,
349 return ctx; 363 return ctx;
350 364
351err_unpin: 365err_unpin:
352 if (is_global_default_ctx && ctx->obj) 366 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
353 i915_gem_object_ggtt_unpin(ctx->obj); 367 i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
354err_destroy: 368err_destroy:
355 i915_gem_context_unreference(ctx); 369 i915_gem_context_unreference(ctx);
356 return ERR_PTR(ret); 370 return ERR_PTR(ret);
@@ -366,23 +380,27 @@ void i915_gem_context_reset(struct drm_device *dev)
366 for (i = 0; i < I915_NUM_RINGS; i++) { 380 for (i = 0; i < I915_NUM_RINGS; i++) {
367 struct intel_engine_cs *ring = &dev_priv->ring[i]; 381 struct intel_engine_cs *ring = &dev_priv->ring[i];
368 struct intel_context *dctx = ring->default_context; 382 struct intel_context *dctx = ring->default_context;
383 struct intel_context *lctx = ring->last_context;
369 384
370 /* Do a fake switch to the default context */ 385 /* Do a fake switch to the default context */
371 if (ring->last_context == dctx) 386 if (lctx == dctx)
372 continue; 387 continue;
373 388
374 if (!ring->last_context) 389 if (!lctx)
375 continue; 390 continue;
376 391
377 if (dctx->obj && i == RCS) { 392 if (dctx->legacy_hw_ctx.rcs_state && i == RCS) {
378 WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj, 393 WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state,
379 get_context_alignment(dev), 0)); 394 get_context_alignment(dev), 0));
380 /* Fake a finish/inactive */ 395 /* Fake a finish/inactive */
381 dctx->obj->base.write_domain = 0; 396 dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
382 dctx->obj->active = 0; 397 dctx->legacy_hw_ctx.rcs_state->active = 0;
383 } 398 }
384 399
385 i915_gem_context_unreference(ring->last_context); 400 if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
401 i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
402
403 i915_gem_context_unreference(lctx);
386 i915_gem_context_reference(dctx); 404 i915_gem_context_reference(dctx);
387 ring->last_context = dctx; 405 ring->last_context = dctx;
388 } 406 }
@@ -429,7 +447,7 @@ void i915_gem_context_fini(struct drm_device *dev)
429 struct intel_context *dctx = dev_priv->ring[RCS].default_context; 447 struct intel_context *dctx = dev_priv->ring[RCS].default_context;
430 int i; 448 int i;
431 449
432 if (dctx->obj) { 450 if (dctx->legacy_hw_ctx.rcs_state) {
433 /* The only known way to stop the gpu from accessing the hw context is 451 /* The only known way to stop the gpu from accessing the hw context is
434 * to reset it. Do this as the very last operation to avoid confusing 452 * to reset it. Do this as the very last operation to avoid confusing
435 * other code, leading to spurious errors. */ 453 * other code, leading to spurious errors. */
@@ -444,13 +462,13 @@ void i915_gem_context_fini(struct drm_device *dev)
444 WARN_ON(!dev_priv->ring[RCS].last_context); 462 WARN_ON(!dev_priv->ring[RCS].last_context);
445 if (dev_priv->ring[RCS].last_context == dctx) { 463 if (dev_priv->ring[RCS].last_context == dctx) {
446 /* Fake switch to NULL context */ 464 /* Fake switch to NULL context */
447 WARN_ON(dctx->obj->active); 465 WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
448 i915_gem_object_ggtt_unpin(dctx->obj); 466 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
449 i915_gem_context_unreference(dctx); 467 i915_gem_context_unreference(dctx);
450 dev_priv->ring[RCS].last_context = NULL; 468 dev_priv->ring[RCS].last_context = NULL;
451 } 469 }
452 470
453 i915_gem_object_ggtt_unpin(dctx->obj); 471 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
454 } 472 }
455 473
456 for (i = 0; i < I915_NUM_RINGS; i++) { 474 for (i = 0; i < I915_NUM_RINGS; i++) {
@@ -570,7 +588,7 @@ mi_set_context(struct intel_engine_cs *ring,
570 588
571 intel_ring_emit(ring, MI_NOOP); 589 intel_ring_emit(ring, MI_NOOP);
572 intel_ring_emit(ring, MI_SET_CONTEXT); 590 intel_ring_emit(ring, MI_SET_CONTEXT);
573 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) | 591 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
574 MI_MM_SPACE_GTT | 592 MI_MM_SPACE_GTT |
575 MI_SAVE_EXT_STATE_EN | 593 MI_SAVE_EXT_STATE_EN |
576 MI_RESTORE_EXT_STATE_EN | 594 MI_RESTORE_EXT_STATE_EN |
@@ -602,16 +620,16 @@ static int do_switch(struct intel_engine_cs *ring,
602 int ret, i; 620 int ret, i;
603 621
604 if (from != NULL && ring == &dev_priv->ring[RCS]) { 622 if (from != NULL && ring == &dev_priv->ring[RCS]) {
605 BUG_ON(from->obj == NULL); 623 BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
606 BUG_ON(!i915_gem_obj_is_pinned(from->obj)); 624 BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
607 } 625 }
608 626
609 if (from == to && from->last_ring == ring && !to->remap_slice) 627 if (from == to && !to->remap_slice)
610 return 0; 628 return 0;
611 629
612 /* Trying to pin first makes error handling easier. */ 630 /* Trying to pin first makes error handling easier. */
613 if (ring == &dev_priv->ring[RCS]) { 631 if (ring == &dev_priv->ring[RCS]) {
614 ret = i915_gem_obj_ggtt_pin(to->obj, 632 ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
615 get_context_alignment(ring->dev), 0); 633 get_context_alignment(ring->dev), 0);
616 if (ret) 634 if (ret)
617 return ret; 635 return ret;
@@ -644,17 +662,17 @@ static int do_switch(struct intel_engine_cs *ring,
644 * 662 *
645 * XXX: We need a real interface to do this instead of trickery. 663 * XXX: We need a real interface to do this instead of trickery.
646 */ 664 */
647 ret = i915_gem_object_set_to_gtt_domain(to->obj, false); 665 ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
648 if (ret) 666 if (ret)
649 goto unpin_out; 667 goto unpin_out;
650 668
651 if (!to->obj->has_global_gtt_mapping) { 669 if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
652 struct i915_vma *vma = i915_gem_obj_to_vma(to->obj, 670 struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
653 &dev_priv->gtt.base); 671 &dev_priv->gtt.base);
654 vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND); 672 vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
655 } 673 }
656 674
657 if (!to->is_initialized || i915_gem_context_is_default(to)) 675 if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
658 hw_flags |= MI_RESTORE_INHIBIT; 676 hw_flags |= MI_RESTORE_INHIBIT;
659 677
660 ret = mi_set_context(ring, to, hw_flags); 678 ret = mi_set_context(ring, to, hw_flags);
@@ -680,8 +698,8 @@ static int do_switch(struct intel_engine_cs *ring,
680 * MI_SET_CONTEXT instead of when the next seqno has completed. 698 * MI_SET_CONTEXT instead of when the next seqno has completed.
681 */ 699 */
682 if (from != NULL) { 700 if (from != NULL) {
683 from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 701 from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
684 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring); 702 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
685 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 703 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
686 * whole damn pipeline, we don't need to explicitly mark the 704 * whole damn pipeline, we don't need to explicitly mark the
687 * object dirty. The only exception is that the context must be 705 * object dirty. The only exception is that the context must be
@@ -689,21 +707,20 @@ static int do_switch(struct intel_engine_cs *ring,
689 * able to defer doing this until we know the object would be 707 * able to defer doing this until we know the object would be
690 * swapped, but there is no way to do that yet. 708 * swapped, but there is no way to do that yet.
691 */ 709 */
692 from->obj->dirty = 1; 710 from->legacy_hw_ctx.rcs_state->dirty = 1;
693 BUG_ON(from->obj->ring != ring); 711 BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
694 712
695 /* obj is kept alive until the next request by its active ref */ 713 /* obj is kept alive until the next request by its active ref */
696 i915_gem_object_ggtt_unpin(from->obj); 714 i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
697 i915_gem_context_unreference(from); 715 i915_gem_context_unreference(from);
698 } 716 }
699 717
700 uninitialized = !to->is_initialized && from == NULL; 718 uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
701 to->is_initialized = true; 719 to->legacy_hw_ctx.initialized = true;
702 720
703done: 721done:
704 i915_gem_context_reference(to); 722 i915_gem_context_reference(to);
705 ring->last_context = to; 723 ring->last_context = to;
706 to->last_ring = ring;
707 724
708 if (uninitialized) { 725 if (uninitialized) {
709 ret = i915_gem_render_state_init(ring); 726 ret = i915_gem_render_state_init(ring);
@@ -715,7 +732,7 @@ done:
715 732
716unpin_out: 733unpin_out:
717 if (ring->id == RCS) 734 if (ring->id == RCS)
718 i915_gem_object_ggtt_unpin(to->obj); 735 i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
719 return ret; 736 return ret;
720} 737}
721 738
@@ -736,7 +753,7 @@ int i915_switch_context(struct intel_engine_cs *ring,
736 753
737 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 754 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
738 755
739 if (to->obj == NULL) { /* We have the fake context */ 756 if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
740 if (to != ring->last_context) { 757 if (to != ring->last_context) {
741 i915_gem_context_reference(to); 758 i915_gem_context_reference(to);
742 if (ring->last_context) 759 if (ring->last_context)
@@ -774,7 +791,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
774 if (IS_ERR(ctx)) 791 if (IS_ERR(ctx))
775 return PTR_ERR(ctx); 792 return PTR_ERR(ctx);
776 793
777 args->ctx_id = ctx->id; 794 args->ctx_id = ctx->user_handle;
778 DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); 795 DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
779 796
780 return 0; 797 return 0;
@@ -788,7 +805,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
788 struct intel_context *ctx; 805 struct intel_context *ctx;
789 int ret; 806 int ret;
790 807
791 if (args->ctx_id == DEFAULT_CONTEXT_ID) 808 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
792 return -ENOENT; 809 return -ENOENT;
793 810
794 ret = i915_mutex_lock_interruptible(dev); 811 ret = i915_mutex_lock_interruptible(dev);
@@ -801,7 +818,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
801 return PTR_ERR(ctx); 818 return PTR_ERR(ctx);
802 } 819 }
803 820
804 idr_remove(&ctx->file_priv->context_idr, ctx->id); 821 idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
805 i915_gem_context_unreference(ctx); 822 i915_gem_context_unreference(ctx);
806 mutex_unlock(&dev->struct_mutex); 823 mutex_unlock(&dev->struct_mutex);
807 824
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3a30133f93e8..60998fc4e5b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -938,7 +938,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
938 struct intel_context *ctx = NULL; 938 struct intel_context *ctx = NULL;
939 struct i915_ctx_hang_stats *hs; 939 struct i915_ctx_hang_stats *hs;
940 940
941 if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID) 941 if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
942 return ERR_PTR(-EINVAL); 942 return ERR_PTR(-EINVAL);
943 943
944 ctx = i915_gem_context_get(file->driver_priv, ctx_id); 944 ctx = i915_gem_context_get(file->driver_priv, ctx_id);
@@ -975,10 +975,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
975 if (obj->base.write_domain) { 975 if (obj->base.write_domain) {
976 obj->dirty = 1; 976 obj->dirty = 1;
977 obj->last_write_seqno = intel_ring_get_seqno(ring); 977 obj->last_write_seqno = intel_ring_get_seqno(ring);
978 /* check for potential scanout */ 978
979 if (i915_gem_obj_ggtt_bound(obj) && 979 intel_fb_obj_invalidate(obj, ring);
980 i915_gem_obj_to_ggtt(obj)->pin_count)
981 intel_mark_fb_busy(obj, ring);
982 980
983 /* update for the implicit flush after a batch */ 981 /* update for the implicit flush after a batch */
984 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; 982 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1028,6 +1026,163 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
1028 return 0; 1026 return 0;
1029} 1027}
1030 1028
1029static int
1030legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1031 struct intel_engine_cs *ring,
1032 struct intel_context *ctx,
1033 struct drm_i915_gem_execbuffer2 *args,
1034 struct list_head *vmas,
1035 struct drm_i915_gem_object *batch_obj,
1036 u64 exec_start, u32 flags)
1037{
1038 struct drm_clip_rect *cliprects = NULL;
1039 struct drm_i915_private *dev_priv = dev->dev_private;
1040 u64 exec_len;
1041 int instp_mode;
1042 u32 instp_mask;
1043 int i, ret = 0;
1044
1045 if (args->num_cliprects != 0) {
1046 if (ring != &dev_priv->ring[RCS]) {
1047 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1048 return -EINVAL;
1049 }
1050
1051 if (INTEL_INFO(dev)->gen >= 5) {
1052 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1053 return -EINVAL;
1054 }
1055
1056 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1057 DRM_DEBUG("execbuf with %u cliprects\n",
1058 args->num_cliprects);
1059 return -EINVAL;
1060 }
1061
1062 cliprects = kcalloc(args->num_cliprects,
1063 sizeof(*cliprects),
1064 GFP_KERNEL);
1065 if (cliprects == NULL) {
1066 ret = -ENOMEM;
1067 goto error;
1068 }
1069
1070 if (copy_from_user(cliprects,
1071 to_user_ptr(args->cliprects_ptr),
1072 sizeof(*cliprects)*args->num_cliprects)) {
1073 ret = -EFAULT;
1074 goto error;
1075 }
1076 } else {
1077 if (args->DR4 == 0xffffffff) {
1078 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1079 args->DR4 = 0;
1080 }
1081
1082 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
1083 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
1084 return -EINVAL;
1085 }
1086 }
1087
1088 ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
1089 if (ret)
1090 goto error;
1091
1092 ret = i915_switch_context(ring, ctx);
1093 if (ret)
1094 goto error;
1095
1096 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1097 instp_mask = I915_EXEC_CONSTANTS_MASK;
1098 switch (instp_mode) {
1099 case I915_EXEC_CONSTANTS_REL_GENERAL:
1100 case I915_EXEC_CONSTANTS_ABSOLUTE:
1101 case I915_EXEC_CONSTANTS_REL_SURFACE:
1102 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
1103 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1104 ret = -EINVAL;
1105 goto error;
1106 }
1107
1108 if (instp_mode != dev_priv->relative_constants_mode) {
1109 if (INTEL_INFO(dev)->gen < 4) {
1110 DRM_DEBUG("no rel constants on pre-gen4\n");
1111 ret = -EINVAL;
1112 goto error;
1113 }
1114
1115 if (INTEL_INFO(dev)->gen > 5 &&
1116 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1117 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1118 ret = -EINVAL;
1119 goto error;
1120 }
1121
1122 /* The HW changed the meaning on this bit on gen6 */
1123 if (INTEL_INFO(dev)->gen >= 6)
1124 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1125 }
1126 break;
1127 default:
1128 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1129 ret = -EINVAL;
1130 goto error;
1131 }
1132
1133 if (ring == &dev_priv->ring[RCS] &&
1134 instp_mode != dev_priv->relative_constants_mode) {
1135 ret = intel_ring_begin(ring, 4);
1136 if (ret)
1137 goto error;
1138
1139 intel_ring_emit(ring, MI_NOOP);
1140 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1141 intel_ring_emit(ring, INSTPM);
1142 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1143 intel_ring_advance(ring);
1144
1145 dev_priv->relative_constants_mode = instp_mode;
1146 }
1147
1148 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1149 ret = i915_reset_gen7_sol_offsets(dev, ring);
1150 if (ret)
1151 goto error;
1152 }
1153
1154 exec_len = args->batch_len;
1155 if (cliprects) {
1156 for (i = 0; i < args->num_cliprects; i++) {
1157 ret = i915_emit_box(dev, &cliprects[i],
1158 args->DR1, args->DR4);
1159 if (ret)
1160 goto error;
1161
1162 ret = ring->dispatch_execbuffer(ring,
1163 exec_start, exec_len,
1164 flags);
1165 if (ret)
1166 goto error;
1167 }
1168 } else {
1169 ret = ring->dispatch_execbuffer(ring,
1170 exec_start, exec_len,
1171 flags);
1172 if (ret)
1173 return ret;
1174 }
1175
1176 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1177
1178 i915_gem_execbuffer_move_to_active(vmas, ring);
1179 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1180
1181error:
1182 kfree(cliprects);
1183 return ret;
1184}
1185
1031/** 1186/**
1032 * Find one BSD ring to dispatch the corresponding BSD command. 1187 * Find one BSD ring to dispatch the corresponding BSD command.
1033 * The Ring ID is returned. 1188 * The Ring ID is returned.
@@ -1087,14 +1242,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1087 struct drm_i915_private *dev_priv = dev->dev_private; 1242 struct drm_i915_private *dev_priv = dev->dev_private;
1088 struct eb_vmas *eb; 1243 struct eb_vmas *eb;
1089 struct drm_i915_gem_object *batch_obj; 1244 struct drm_i915_gem_object *batch_obj;
1090 struct drm_clip_rect *cliprects = NULL;
1091 struct intel_engine_cs *ring; 1245 struct intel_engine_cs *ring;
1092 struct intel_context *ctx; 1246 struct intel_context *ctx;
1093 struct i915_address_space *vm; 1247 struct i915_address_space *vm;
1094 const u32 ctx_id = i915_execbuffer2_get_context_id(*args); 1248 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1095 u64 exec_start = args->batch_start_offset, exec_len; 1249 u64 exec_start = args->batch_start_offset;
1096 u32 mask, flags; 1250 u32 flags;
1097 int ret, mode, i; 1251 int ret;
1098 bool need_relocs; 1252 bool need_relocs;
1099 1253
1100 if (!i915_gem_check_execbuffer(args)) 1254 if (!i915_gem_check_execbuffer(args))
@@ -1138,87 +1292,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1138 return -EINVAL; 1292 return -EINVAL;
1139 } 1293 }
1140 1294
1141 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1142 mask = I915_EXEC_CONSTANTS_MASK;
1143 switch (mode) {
1144 case I915_EXEC_CONSTANTS_REL_GENERAL:
1145 case I915_EXEC_CONSTANTS_ABSOLUTE:
1146 case I915_EXEC_CONSTANTS_REL_SURFACE:
1147 if (mode != 0 && ring != &dev_priv->ring[RCS]) {
1148 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1149 return -EINVAL;
1150 }
1151
1152 if (mode != dev_priv->relative_constants_mode) {
1153 if (INTEL_INFO(dev)->gen < 4) {
1154 DRM_DEBUG("no rel constants on pre-gen4\n");
1155 return -EINVAL;
1156 }
1157
1158 if (INTEL_INFO(dev)->gen > 5 &&
1159 mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1160 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1161 return -EINVAL;
1162 }
1163
1164 /* The HW changed the meaning on this bit on gen6 */
1165 if (INTEL_INFO(dev)->gen >= 6)
1166 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1167 }
1168 break;
1169 default:
1170 DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
1171 return -EINVAL;
1172 }
1173
1174 if (args->buffer_count < 1) { 1295 if (args->buffer_count < 1) {
1175 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); 1296 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1176 return -EINVAL; 1297 return -EINVAL;
1177 } 1298 }
1178 1299
1179 if (args->num_cliprects != 0) {
1180 if (ring != &dev_priv->ring[RCS]) {
1181 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1182 return -EINVAL;
1183 }
1184
1185 if (INTEL_INFO(dev)->gen >= 5) {
1186 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1187 return -EINVAL;
1188 }
1189
1190 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1191 DRM_DEBUG("execbuf with %u cliprects\n",
1192 args->num_cliprects);
1193 return -EINVAL;
1194 }
1195
1196 cliprects = kcalloc(args->num_cliprects,
1197 sizeof(*cliprects),
1198 GFP_KERNEL);
1199 if (cliprects == NULL) {
1200 ret = -ENOMEM;
1201 goto pre_mutex_err;
1202 }
1203
1204 if (copy_from_user(cliprects,
1205 to_user_ptr(args->cliprects_ptr),
1206 sizeof(*cliprects)*args->num_cliprects)) {
1207 ret = -EFAULT;
1208 goto pre_mutex_err;
1209 }
1210 } else {
1211 if (args->DR4 == 0xffffffff) {
1212 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1213 args->DR4 = 0;
1214 }
1215
1216 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
1217 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
1218 return -EINVAL;
1219 }
1220 }
1221
1222 intel_runtime_pm_get(dev_priv); 1300 intel_runtime_pm_get(dev_priv);
1223 1301
1224 ret = i915_mutex_lock_interruptible(dev); 1302 ret = i915_mutex_lock_interruptible(dev);
@@ -1322,63 +1400,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1322 else 1400 else
1323 exec_start += i915_gem_obj_offset(batch_obj, vm); 1401 exec_start += i915_gem_obj_offset(batch_obj, vm);
1324 1402
1325 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas); 1403 ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
1404 args, &eb->vmas, batch_obj, exec_start, flags);
1326 if (ret) 1405 if (ret)
1327 goto err; 1406 goto err;
1328 1407
1329 ret = i915_switch_context(ring, ctx);
1330 if (ret)
1331 goto err;
1332
1333 if (ring == &dev_priv->ring[RCS] &&
1334 mode != dev_priv->relative_constants_mode) {
1335 ret = intel_ring_begin(ring, 4);
1336 if (ret)
1337 goto err;
1338
1339 intel_ring_emit(ring, MI_NOOP);
1340 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1341 intel_ring_emit(ring, INSTPM);
1342 intel_ring_emit(ring, mask << 16 | mode);
1343 intel_ring_advance(ring);
1344
1345 dev_priv->relative_constants_mode = mode;
1346 }
1347
1348 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1349 ret = i915_reset_gen7_sol_offsets(dev, ring);
1350 if (ret)
1351 goto err;
1352 }
1353
1354
1355 exec_len = args->batch_len;
1356 if (cliprects) {
1357 for (i = 0; i < args->num_cliprects; i++) {
1358 ret = i915_emit_box(dev, &cliprects[i],
1359 args->DR1, args->DR4);
1360 if (ret)
1361 goto err;
1362
1363 ret = ring->dispatch_execbuffer(ring,
1364 exec_start, exec_len,
1365 flags);
1366 if (ret)
1367 goto err;
1368 }
1369 } else {
1370 ret = ring->dispatch_execbuffer(ring,
1371 exec_start, exec_len,
1372 flags);
1373 if (ret)
1374 goto err;
1375 }
1376
1377 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1378
1379 i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
1380 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1381
1382err: 1408err:
1383 /* the request owns the ref now */ 1409 /* the request owns the ref now */
1384 i915_gem_context_unreference(ctx); 1410 i915_gem_context_unreference(ctx);
@@ -1387,8 +1413,6 @@ err:
1387 mutex_unlock(&dev->struct_mutex); 1413 mutex_unlock(&dev->struct_mutex);
1388 1414
1389pre_mutex_err: 1415pre_mutex_err:
1390 kfree(cliprects);
1391
1392 /* intel_gpu_busy should also get a ref, so it will free when the device 1416 /* intel_gpu_busy should also get a ref, so it will free when the device
1393 * is really idle. */ 1417 * is really idle. */
1394 intel_runtime_pm_put(dev_priv); 1418 intel_runtime_pm_put(dev_priv);
@@ -1525,7 +1549,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1525 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); 1549 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1526 if (!ret) { 1550 if (!ret) {
1527 /* Copy the new buffer offsets back to the user's exec list. */ 1551 /* Copy the new buffer offsets back to the user's exec list. */
1528 struct drm_i915_gem_exec_object2 *user_exec_list = 1552 struct drm_i915_gem_exec_object2 __user *user_exec_list =
1529 to_user_ptr(args->buffers_ptr); 1553 to_user_ptr(args->buffers_ptr);
1530 int i; 1554 int i;
1531 1555
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8b3cde703364..5188936bca0a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -63,6 +63,13 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
63 } 63 }
64#endif 64#endif
65 65
66 /* Early VLV doesn't have this */
67 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
68 dev->pdev->revision < 0xb) {
69 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
70 return 0;
71 }
72
66 return HAS_ALIASING_PPGTT(dev) ? 1 : 0; 73 return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
67} 74}
68 75
@@ -110,7 +117,7 @@ static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
110 117
111static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, 118static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
112 enum i915_cache_level level, 119 enum i915_cache_level level,
113 bool valid) 120 bool valid, u32 unused)
114{ 121{
115 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; 122 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
116 pte |= GEN6_PTE_ADDR_ENCODE(addr); 123 pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -132,7 +139,7 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
132 139
133static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, 140static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
134 enum i915_cache_level level, 141 enum i915_cache_level level,
135 bool valid) 142 bool valid, u32 unused)
136{ 143{
137 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; 144 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
138 pte |= GEN6_PTE_ADDR_ENCODE(addr); 145 pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -156,7 +163,7 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
156 163
157static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, 164static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
158 enum i915_cache_level level, 165 enum i915_cache_level level,
159 bool valid) 166 bool valid, u32 flags)
160{ 167{
161 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; 168 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
162 pte |= GEN6_PTE_ADDR_ENCODE(addr); 169 pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -164,7 +171,8 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
164 /* Mark the page as writeable. Other platforms don't have a 171 /* Mark the page as writeable. Other platforms don't have a
165 * setting for read-only/writable, so this matches that behavior. 172 * setting for read-only/writable, so this matches that behavior.
166 */ 173 */
167 pte |= BYT_PTE_WRITEABLE; 174 if (!(flags & PTE_READ_ONLY))
175 pte |= BYT_PTE_WRITEABLE;
168 176
169 if (level != I915_CACHE_NONE) 177 if (level != I915_CACHE_NONE)
170 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; 178 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
@@ -174,7 +182,7 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
174 182
175static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, 183static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
176 enum i915_cache_level level, 184 enum i915_cache_level level,
177 bool valid) 185 bool valid, u32 unused)
178{ 186{
179 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; 187 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
180 pte |= HSW_PTE_ADDR_ENCODE(addr); 188 pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -187,7 +195,7 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
187 195
188static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, 196static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
189 enum i915_cache_level level, 197 enum i915_cache_level level,
190 bool valid) 198 bool valid, u32 unused)
191{ 199{
192 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; 200 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
193 pte |= HSW_PTE_ADDR_ENCODE(addr); 201 pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -301,7 +309,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
301static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, 309static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
302 struct sg_table *pages, 310 struct sg_table *pages,
303 uint64_t start, 311 uint64_t start,
304 enum i915_cache_level cache_level) 312 enum i915_cache_level cache_level, u32 unused)
305{ 313{
306 struct i915_hw_ppgtt *ppgtt = 314 struct i915_hw_ppgtt *ppgtt =
307 container_of(vm, struct i915_hw_ppgtt, base); 315 container_of(vm, struct i915_hw_ppgtt, base);
@@ -639,7 +647,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
639 uint32_t pd_entry; 647 uint32_t pd_entry;
640 int pte, pde; 648 int pte, pde;
641 649
642 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); 650 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
643 651
644 pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + 652 pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
645 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); 653 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
@@ -941,7 +949,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
941 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 949 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
942 unsigned last_pte, i; 950 unsigned last_pte, i;
943 951
944 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); 952 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
945 953
946 while (num_entries) { 954 while (num_entries) {
947 last_pte = first_pte + num_entries; 955 last_pte = first_pte + num_entries;
@@ -964,7 +972,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
964static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, 972static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
965 struct sg_table *pages, 973 struct sg_table *pages,
966 uint64_t start, 974 uint64_t start,
967 enum i915_cache_level cache_level) 975 enum i915_cache_level cache_level, u32 flags)
968{ 976{
969 struct i915_hw_ppgtt *ppgtt = 977 struct i915_hw_ppgtt *ppgtt =
970 container_of(vm, struct i915_hw_ppgtt, base); 978 container_of(vm, struct i915_hw_ppgtt, base);
@@ -981,7 +989,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
981 989
982 pt_vaddr[act_pte] = 990 pt_vaddr[act_pte] =
983 vm->pte_encode(sg_page_iter_dma_address(&sg_iter), 991 vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
984 cache_level, true); 992 cache_level, true, flags);
993
985 if (++act_pte == I915_PPGTT_PT_ENTRIES) { 994 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
986 kunmap_atomic(pt_vaddr); 995 kunmap_atomic(pt_vaddr);
987 pt_vaddr = NULL; 996 pt_vaddr = NULL;
@@ -1218,8 +1227,12 @@ ppgtt_bind_vma(struct i915_vma *vma,
1218 enum i915_cache_level cache_level, 1227 enum i915_cache_level cache_level,
1219 u32 flags) 1228 u32 flags)
1220{ 1229{
1230 /* Currently applicable only to VLV */
1231 if (vma->obj->gt_ro)
1232 flags |= PTE_READ_ONLY;
1233
1221 vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, 1234 vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
1222 cache_level); 1235 cache_level, flags);
1223} 1236}
1224 1237
1225static void ppgtt_unbind_vma(struct i915_vma *vma) 1238static void ppgtt_unbind_vma(struct i915_vma *vma)
@@ -1394,7 +1407,7 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
1394static void gen8_ggtt_insert_entries(struct i915_address_space *vm, 1407static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
1395 struct sg_table *st, 1408 struct sg_table *st,
1396 uint64_t start, 1409 uint64_t start,
1397 enum i915_cache_level level) 1410 enum i915_cache_level level, u32 unused)
1398{ 1411{
1399 struct drm_i915_private *dev_priv = vm->dev->dev_private; 1412 struct drm_i915_private *dev_priv = vm->dev->dev_private;
1400 unsigned first_entry = start >> PAGE_SHIFT; 1413 unsigned first_entry = start >> PAGE_SHIFT;
@@ -1440,7 +1453,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
1440static void gen6_ggtt_insert_entries(struct i915_address_space *vm, 1453static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
1441 struct sg_table *st, 1454 struct sg_table *st,
1442 uint64_t start, 1455 uint64_t start,
1443 enum i915_cache_level level) 1456 enum i915_cache_level level, u32 flags)
1444{ 1457{
1445 struct drm_i915_private *dev_priv = vm->dev->dev_private; 1458 struct drm_i915_private *dev_priv = vm->dev->dev_private;
1446 unsigned first_entry = start >> PAGE_SHIFT; 1459 unsigned first_entry = start >> PAGE_SHIFT;
@@ -1452,7 +1465,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
1452 1465
1453 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 1466 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
1454 addr = sg_page_iter_dma_address(&sg_iter); 1467 addr = sg_page_iter_dma_address(&sg_iter);
1455 iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]); 1468 iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
1456 i++; 1469 i++;
1457 } 1470 }
1458 1471
@@ -1464,7 +1477,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
1464 */ 1477 */
1465 if (i != 0) 1478 if (i != 0)
1466 WARN_ON(readl(&gtt_entries[i-1]) != 1479 WARN_ON(readl(&gtt_entries[i-1]) !=
1467 vm->pte_encode(addr, level, true)); 1480 vm->pte_encode(addr, level, true, flags));
1468 1481
1469 /* This next bit makes the above posting read even more important. We 1482 /* This next bit makes the above posting read even more important. We
1470 * want to flush the TLBs only after we're certain all the PTE updates 1483 * want to flush the TLBs only after we're certain all the PTE updates
@@ -1518,7 +1531,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
1518 first_entry, num_entries, max_entries)) 1531 first_entry, num_entries, max_entries))
1519 num_entries = max_entries; 1532 num_entries = max_entries;
1520 1533
1521 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch); 1534 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
1522 1535
1523 for (i = 0; i < num_entries; i++) 1536 for (i = 0; i < num_entries; i++)
1524 iowrite32(scratch_pte, &gtt_base[i]); 1537 iowrite32(scratch_pte, &gtt_base[i]);
@@ -1567,6 +1580,10 @@ static void ggtt_bind_vma(struct i915_vma *vma,
1567 struct drm_i915_private *dev_priv = dev->dev_private; 1580 struct drm_i915_private *dev_priv = dev->dev_private;
1568 struct drm_i915_gem_object *obj = vma->obj; 1581 struct drm_i915_gem_object *obj = vma->obj;
1569 1582
1583 /* Currently applicable only to VLV */
1584 if (obj->gt_ro)
1585 flags |= PTE_READ_ONLY;
1586
1570 /* If there is no aliasing PPGTT, or the caller needs a global mapping, 1587 /* If there is no aliasing PPGTT, or the caller needs a global mapping,
1571 * or we have a global mapping already but the cacheability flags have 1588 * or we have a global mapping already but the cacheability flags have
1572 * changed, set the global PTEs. 1589 * changed, set the global PTEs.
@@ -1583,7 +1600,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
1583 (cache_level != obj->cache_level)) { 1600 (cache_level != obj->cache_level)) {
1584 vma->vm->insert_entries(vma->vm, obj->pages, 1601 vma->vm->insert_entries(vma->vm, obj->pages,
1585 vma->node.start, 1602 vma->node.start,
1586 cache_level); 1603 cache_level, flags);
1587 obj->has_global_gtt_mapping = 1; 1604 obj->has_global_gtt_mapping = 1;
1588 } 1605 }
1589 } 1606 }
@@ -1595,7 +1612,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
1595 appgtt->base.insert_entries(&appgtt->base, 1612 appgtt->base.insert_entries(&appgtt->base,
1596 vma->obj->pages, 1613 vma->obj->pages,
1597 vma->node.start, 1614 vma->node.start,
1598 cache_level); 1615 cache_level, flags);
1599 vma->obj->has_aliasing_ppgtt_mapping = 1; 1616 vma->obj->has_aliasing_ppgtt_mapping = 1;
1600 } 1617 }
1601} 1618}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 1b96a06be3cb..8d6f7c18c404 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -154,6 +154,7 @@ struct i915_vma {
154 void (*unbind_vma)(struct i915_vma *vma); 154 void (*unbind_vma)(struct i915_vma *vma);
155 /* Map an object into an address space with the given cache flags. */ 155 /* Map an object into an address space with the given cache flags. */
156#define GLOBAL_BIND (1<<0) 156#define GLOBAL_BIND (1<<0)
157#define PTE_READ_ONLY (1<<1)
157 void (*bind_vma)(struct i915_vma *vma, 158 void (*bind_vma)(struct i915_vma *vma,
158 enum i915_cache_level cache_level, 159 enum i915_cache_level cache_level,
159 u32 flags); 160 u32 flags);
@@ -197,7 +198,7 @@ struct i915_address_space {
197 /* FIXME: Need a more generic return type */ 198 /* FIXME: Need a more generic return type */
198 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, 199 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
199 enum i915_cache_level level, 200 enum i915_cache_level level,
200 bool valid); /* Create a valid PTE */ 201 bool valid, u32 flags); /* Create a valid PTE */
201 void (*clear_range)(struct i915_address_space *vm, 202 void (*clear_range)(struct i915_address_space *vm,
202 uint64_t start, 203 uint64_t start,
203 uint64_t length, 204 uint64_t length,
@@ -205,7 +206,7 @@ struct i915_address_space {
205 void (*insert_entries)(struct i915_address_space *vm, 206 void (*insert_entries)(struct i915_address_space *vm,
206 struct sg_table *st, 207 struct sg_table *st,
207 uint64_t start, 208 uint64_t start,
208 enum i915_cache_level cache_level); 209 enum i915_cache_level cache_level, u32 flags);
209 void (*cleanup)(struct i915_address_space *vm); 210 void (*cleanup)(struct i915_address_space *vm);
210}; 211};
211 212
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 34894b573064..e60be3f552a6 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -28,64 +28,13 @@
28#include "i915_drv.h" 28#include "i915_drv.h"
29#include "intel_renderstate.h" 29#include "intel_renderstate.h"
30 30
31struct i915_render_state { 31struct render_state {
32 const struct intel_renderstate_rodata *rodata;
32 struct drm_i915_gem_object *obj; 33 struct drm_i915_gem_object *obj;
33 unsigned long ggtt_offset; 34 u64 ggtt_offset;
34 u32 *batch; 35 int gen;
35 u32 size;
36 u32 len;
37}; 36};
38 37
39static struct i915_render_state *render_state_alloc(struct drm_device *dev)
40{
41 struct i915_render_state *so;
42 struct page *page;
43 int ret;
44
45 so = kzalloc(sizeof(*so), GFP_KERNEL);
46 if (!so)
47 return ERR_PTR(-ENOMEM);
48
49 so->obj = i915_gem_alloc_object(dev, 4096);
50 if (so->obj == NULL) {
51 ret = -ENOMEM;
52 goto free;
53 }
54 so->size = 4096;
55
56 ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
57 if (ret)
58 goto free_gem;
59
60 BUG_ON(so->obj->pages->nents != 1);
61 page = sg_page(so->obj->pages->sgl);
62
63 so->batch = kmap(page);
64 if (!so->batch) {
65 ret = -ENOMEM;
66 goto unpin;
67 }
68
69 so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
70
71 return so;
72unpin:
73 i915_gem_object_ggtt_unpin(so->obj);
74free_gem:
75 drm_gem_object_unreference(&so->obj->base);
76free:
77 kfree(so);
78 return ERR_PTR(ret);
79}
80
81static void render_state_free(struct i915_render_state *so)
82{
83 kunmap(kmap_to_page(so->batch));
84 i915_gem_object_ggtt_unpin(so->obj);
85 drm_gem_object_unreference(&so->obj->base);
86 kfree(so);
87}
88
89static const struct intel_renderstate_rodata * 38static const struct intel_renderstate_rodata *
90render_state_get_rodata(struct drm_device *dev, const int gen) 39render_state_get_rodata(struct drm_device *dev, const int gen)
91{ 40{
@@ -101,98 +50,120 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
101 return NULL; 50 return NULL;
102} 51}
103 52
104static int render_state_setup(const int gen, 53static int render_state_init(struct render_state *so, struct drm_device *dev)
105 const struct intel_renderstate_rodata *rodata,
106 struct i915_render_state *so)
107{ 54{
108 const u64 goffset = i915_gem_obj_ggtt_offset(so->obj);
109 u32 reloc_index = 0;
110 u32 * const d = so->batch;
111 unsigned int i = 0;
112 int ret; 55 int ret;
113 56
114 if (!rodata || rodata->batch_items * 4 > so->size) 57 so->gen = INTEL_INFO(dev)->gen;
58 so->rodata = render_state_get_rodata(dev, so->gen);
59 if (so->rodata == NULL)
60 return 0;
61
62 if (so->rodata->batch_items * 4 > 4096)
115 return -EINVAL; 63 return -EINVAL;
116 64
65 so->obj = i915_gem_alloc_object(dev, 4096);
66 if (so->obj == NULL)
67 return -ENOMEM;
68
69 ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
70 if (ret)
71 goto free_gem;
72
73 so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
74 return 0;
75
76free_gem:
77 drm_gem_object_unreference(&so->obj->base);
78 return ret;
79}
80
81static int render_state_setup(struct render_state *so)
82{
83 const struct intel_renderstate_rodata *rodata = so->rodata;
84 unsigned int i = 0, reloc_index = 0;
85 struct page *page;
86 u32 *d;
87 int ret;
88
117 ret = i915_gem_object_set_to_cpu_domain(so->obj, true); 89 ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
118 if (ret) 90 if (ret)
119 return ret; 91 return ret;
120 92
93 page = sg_page(so->obj->pages->sgl);
94 d = kmap(page);
95
121 while (i < rodata->batch_items) { 96 while (i < rodata->batch_items) {
122 u32 s = rodata->batch[i]; 97 u32 s = rodata->batch[i];
123 98
124 if (reloc_index < rodata->reloc_items && 99 if (i * 4 == rodata->reloc[reloc_index]) {
125 i * 4 == rodata->reloc[reloc_index]) { 100 u64 r = s + so->ggtt_offset;
126 101 s = lower_32_bits(r);
127 s += goffset & 0xffffffff; 102 if (so->gen >= 8) {
128
129 /* We keep batch offsets max 32bit */
130 if (gen >= 8) {
131 if (i + 1 >= rodata->batch_items || 103 if (i + 1 >= rodata->batch_items ||
132 rodata->batch[i + 1] != 0) 104 rodata->batch[i + 1] != 0)
133 return -EINVAL; 105 return -EINVAL;
134 106
135 d[i] = s; 107 d[i++] = s;
136 i++; 108 s = upper_32_bits(r);
137 s = (goffset & 0xffffffff00000000ull) >> 32;
138 } 109 }
139 110
140 reloc_index++; 111 reloc_index++;
141 } 112 }
142 113
143 d[i] = s; 114 d[i++] = s;
144 i++;
145 } 115 }
116 kunmap(page);
146 117
147 ret = i915_gem_object_set_to_gtt_domain(so->obj, false); 118 ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
148 if (ret) 119 if (ret)
149 return ret; 120 return ret;
150 121
151 if (rodata->reloc_items != reloc_index) { 122 if (rodata->reloc[reloc_index] != -1) {
152 DRM_ERROR("not all relocs resolved, %d out of %d\n", 123 DRM_ERROR("only %d relocs resolved\n", reloc_index);
153 reloc_index, rodata->reloc_items);
154 return -EINVAL; 124 return -EINVAL;
155 } 125 }
156 126
157 so->len = rodata->batch_items * 4;
158
159 return 0; 127 return 0;
160} 128}
161 129
130static void render_state_fini(struct render_state *so)
131{
132 i915_gem_object_ggtt_unpin(so->obj);
133 drm_gem_object_unreference(&so->obj->base);
134}
135
162int i915_gem_render_state_init(struct intel_engine_cs *ring) 136int i915_gem_render_state_init(struct intel_engine_cs *ring)
163{ 137{
164 const int gen = INTEL_INFO(ring->dev)->gen; 138 struct render_state so;
165 struct i915_render_state *so;
166 const struct intel_renderstate_rodata *rodata;
167 int ret; 139 int ret;
168 140
169 if (WARN_ON(ring->id != RCS)) 141 if (WARN_ON(ring->id != RCS))
170 return -ENOENT; 142 return -ENOENT;
171 143
172 rodata = render_state_get_rodata(ring->dev, gen); 144 ret = render_state_init(&so, ring->dev);
173 if (rodata == NULL) 145 if (ret)
174 return 0; 146 return ret;
175 147
176 so = render_state_alloc(ring->dev); 148 if (so.rodata == NULL)
177 if (IS_ERR(so)) 149 return 0;
178 return PTR_ERR(so);
179 150
180 ret = render_state_setup(gen, rodata, so); 151 ret = render_state_setup(&so);
181 if (ret) 152 if (ret)
182 goto out; 153 goto out;
183 154
184 ret = ring->dispatch_execbuffer(ring, 155 ret = ring->dispatch_execbuffer(ring,
185 i915_gem_obj_ggtt_offset(so->obj), 156 so.ggtt_offset,
186 so->len, 157 so.rodata->batch_items * 4,
187 I915_DISPATCH_SECURE); 158 I915_DISPATCH_SECURE);
188 if (ret) 159 if (ret)
189 goto out; 160 goto out;
190 161
191 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so->obj), ring); 162 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
192 163
193 ret = __i915_add_request(ring, NULL, so->obj, NULL); 164 ret = __i915_add_request(ring, NULL, so.obj, NULL);
194 /* __i915_add_request moves object to inactive if it fails */ 165 /* __i915_add_request moves object to inactive if it fails */
195out: 166out:
196 render_state_free(so); 167 render_state_fini(&so);
197 return ret; 168 return ret;
198} 169}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 7465ab0fd396..21c025a209c0 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -147,30 +147,68 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
147 return base; 147 return base;
148} 148}
149 149
150static int i915_setup_compression(struct drm_device *dev, int size) 150static int find_compression_threshold(struct drm_device *dev,
151 struct drm_mm_node *node,
152 int size,
153 int fb_cpp)
151{ 154{
152 struct drm_i915_private *dev_priv = dev->dev_private; 155 struct drm_i915_private *dev_priv = dev->dev_private;
153 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); 156 int compression_threshold = 1;
154 int ret; 157 int ret;
155 158
156 compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL); 159 /* HACK: This code depends on what we will do in *_enable_fbc. If that
157 if (!compressed_fb) 160 * code changes, this code needs to change as well.
158 goto err_llb; 161 *
162 * The enable_fbc code will attempt to use one of our 2 compression
163 * thresholds, therefore, in that case, we only have 1 resort.
164 */
159 165
160 /* Try to over-allocate to reduce reallocations and fragmentation */ 166 /* Try to over-allocate to reduce reallocations and fragmentation. */
161 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, 167 ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
162 size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT); 168 size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
163 if (ret) 169 if (ret == 0)
164 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, 170 return compression_threshold;
165 size >>= 1, 4096, 171
166 DRM_MM_SEARCH_DEFAULT); 172again:
167 if (ret) 173 /* HW's ability to limit the CFB is 1:4 */
174 if (compression_threshold > 4 ||
175 (fb_cpp == 2 && compression_threshold == 2))
176 return 0;
177
178 ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
179 size >>= 1, 4096,
180 DRM_MM_SEARCH_DEFAULT);
181 if (ret && INTEL_INFO(dev)->gen <= 4) {
182 return 0;
183 } else if (ret) {
184 compression_threshold <<= 1;
185 goto again;
186 } else {
187 return compression_threshold;
188 }
189}
190
191static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
192{
193 struct drm_i915_private *dev_priv = dev->dev_private;
194 struct drm_mm_node *uninitialized_var(compressed_llb);
195 int ret;
196
197 ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
198 size, fb_cpp);
199 if (!ret)
168 goto err_llb; 200 goto err_llb;
201 else if (ret > 1) {
202 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
203
204 }
205
206 dev_priv->fbc.threshold = ret;
169 207
170 if (HAS_PCH_SPLIT(dev)) 208 if (HAS_PCH_SPLIT(dev))
171 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); 209 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
172 else if (IS_GM45(dev)) { 210 else if (IS_GM45(dev)) {
173 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 211 I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
174 } else { 212 } else {
175 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); 213 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
176 if (!compressed_llb) 214 if (!compressed_llb)
@@ -184,13 +222,12 @@ static int i915_setup_compression(struct drm_device *dev, int size)
184 dev_priv->fbc.compressed_llb = compressed_llb; 222 dev_priv->fbc.compressed_llb = compressed_llb;
185 223
186 I915_WRITE(FBC_CFB_BASE, 224 I915_WRITE(FBC_CFB_BASE,
187 dev_priv->mm.stolen_base + compressed_fb->start); 225 dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
188 I915_WRITE(FBC_LL_BASE, 226 I915_WRITE(FBC_LL_BASE,
189 dev_priv->mm.stolen_base + compressed_llb->start); 227 dev_priv->mm.stolen_base + compressed_llb->start);
190 } 228 }
191 229
192 dev_priv->fbc.compressed_fb = compressed_fb; 230 dev_priv->fbc.size = size / dev_priv->fbc.threshold;
193 dev_priv->fbc.size = size;
194 231
195 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", 232 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
196 size); 233 size);
@@ -199,14 +236,13 @@ static int i915_setup_compression(struct drm_device *dev, int size)
199 236
200err_fb: 237err_fb:
201 kfree(compressed_llb); 238 kfree(compressed_llb);
202 drm_mm_remove_node(compressed_fb); 239 drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
203err_llb: 240err_llb:
204 kfree(compressed_fb);
205 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); 241 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
206 return -ENOSPC; 242 return -ENOSPC;
207} 243}
208 244
209int i915_gem_stolen_setup_compression(struct drm_device *dev, int size) 245int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
210{ 246{
211 struct drm_i915_private *dev_priv = dev->dev_private; 247 struct drm_i915_private *dev_priv = dev->dev_private;
212 248
@@ -219,7 +255,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
219 /* Release any current block */ 255 /* Release any current block */
220 i915_gem_stolen_cleanup_compression(dev); 256 i915_gem_stolen_cleanup_compression(dev);
221 257
222 return i915_setup_compression(dev, size); 258 return i915_setup_compression(dev, size, fb_cpp);
223} 259}
224 260
225void i915_gem_stolen_cleanup_compression(struct drm_device *dev) 261void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
@@ -229,10 +265,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
229 if (dev_priv->fbc.size == 0) 265 if (dev_priv->fbc.size == 0)
230 return; 266 return;
231 267
232 if (dev_priv->fbc.compressed_fb) { 268 drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
233 drm_mm_remove_node(dev_priv->fbc.compressed_fb);
234 kfree(dev_priv->fbc.compressed_fb);
235 }
236 269
237 if (dev_priv->fbc.compressed_llb) { 270 if (dev_priv->fbc.compressed_llb) {
238 drm_mm_remove_node(dev_priv->fbc.compressed_llb); 271 drm_mm_remove_node(dev_priv->fbc.compressed_llb);
@@ -336,9 +369,20 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
336 kfree(obj->pages); 369 kfree(obj->pages);
337} 370}
338 371
372
373static void
374i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
375{
376 if (obj->stolen) {
377 drm_mm_remove_node(obj->stolen);
378 kfree(obj->stolen);
379 obj->stolen = NULL;
380 }
381}
339static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { 382static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
340 .get_pages = i915_gem_object_get_pages_stolen, 383 .get_pages = i915_gem_object_get_pages_stolen,
341 .put_pages = i915_gem_object_put_pages_stolen, 384 .put_pages = i915_gem_object_put_pages_stolen,
385 .release = i915_gem_object_release_stolen,
342}; 386};
343 387
344static struct drm_i915_gem_object * 388static struct drm_i915_gem_object *
@@ -496,13 +540,3 @@ err_out:
496 drm_gem_object_unreference(&obj->base); 540 drm_gem_object_unreference(&obj->base);
497 return NULL; 541 return NULL;
498} 542}
499
500void
501i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
502{
503 if (obj->stolen) {
504 drm_mm_remove_node(obj->stolen);
505 kfree(obj->stolen);
506 obj->stolen = NULL;
507 }
508}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 21ea92886a56..fe69fc837d9e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -40,19 +40,87 @@ struct i915_mmu_notifier {
40 struct hlist_node node; 40 struct hlist_node node;
41 struct mmu_notifier mn; 41 struct mmu_notifier mn;
42 struct rb_root objects; 42 struct rb_root objects;
43 struct list_head linear;
43 struct drm_device *dev; 44 struct drm_device *dev;
44 struct mm_struct *mm; 45 struct mm_struct *mm;
45 struct work_struct work; 46 struct work_struct work;
46 unsigned long count; 47 unsigned long count;
47 unsigned long serial; 48 unsigned long serial;
49 bool has_linear;
48}; 50};
49 51
50struct i915_mmu_object { 52struct i915_mmu_object {
51 struct i915_mmu_notifier *mmu; 53 struct i915_mmu_notifier *mmu;
52 struct interval_tree_node it; 54 struct interval_tree_node it;
55 struct list_head link;
53 struct drm_i915_gem_object *obj; 56 struct drm_i915_gem_object *obj;
57 bool is_linear;
54}; 58};
55 59
60static unsigned long cancel_userptr(struct drm_i915_gem_object *obj)
61{
62 struct drm_device *dev = obj->base.dev;
63 unsigned long end;
64
65 mutex_lock(&dev->struct_mutex);
66 /* Cancel any active worker and force us to re-evaluate gup */
67 obj->userptr.work = NULL;
68
69 if (obj->pages != NULL) {
70 struct drm_i915_private *dev_priv = to_i915(dev);
71 struct i915_vma *vma, *tmp;
72 bool was_interruptible;
73
74 was_interruptible = dev_priv->mm.interruptible;
75 dev_priv->mm.interruptible = false;
76
77 list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
78 int ret = i915_vma_unbind(vma);
79 WARN_ON(ret && ret != -EIO);
80 }
81 WARN_ON(i915_gem_object_put_pages(obj));
82
83 dev_priv->mm.interruptible = was_interruptible;
84 }
85
86 end = obj->userptr.ptr + obj->base.size;
87
88 drm_gem_object_unreference(&obj->base);
89 mutex_unlock(&dev->struct_mutex);
90
91 return end;
92}
93
94static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
95 struct mm_struct *mm,
96 unsigned long start,
97 unsigned long end)
98{
99 struct i915_mmu_object *mmu;
100 unsigned long serial;
101
102restart:
103 serial = mn->serial;
104 list_for_each_entry(mmu, &mn->linear, link) {
105 struct drm_i915_gem_object *obj;
106
107 if (mmu->it.last < start || mmu->it.start > end)
108 continue;
109
110 obj = mmu->obj;
111 drm_gem_object_reference(&obj->base);
112 spin_unlock(&mn->lock);
113
114 cancel_userptr(obj);
115
116 spin_lock(&mn->lock);
117 if (serial != mn->serial)
118 goto restart;
119 }
120
121 return NULL;
122}
123
56static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, 124static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
57 struct mm_struct *mm, 125 struct mm_struct *mm,
58 unsigned long start, 126 unsigned long start,
@@ -60,16 +128,18 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
60{ 128{
61 struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn); 129 struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
62 struct interval_tree_node *it = NULL; 130 struct interval_tree_node *it = NULL;
131 unsigned long next = start;
63 unsigned long serial = 0; 132 unsigned long serial = 0;
64 133
65 end--; /* interval ranges are inclusive, but invalidate range is exclusive */ 134 end--; /* interval ranges are inclusive, but invalidate range is exclusive */
66 while (start < end) { 135 while (next < end) {
67 struct drm_i915_gem_object *obj; 136 struct drm_i915_gem_object *obj = NULL;
68 137
69 obj = NULL;
70 spin_lock(&mn->lock); 138 spin_lock(&mn->lock);
71 if (serial == mn->serial) 139 if (mn->has_linear)
72 it = interval_tree_iter_next(it, start, end); 140 it = invalidate_range__linear(mn, mm, start, end);
141 else if (serial == mn->serial)
142 it = interval_tree_iter_next(it, next, end);
73 else 143 else
74 it = interval_tree_iter_first(&mn->objects, start, end); 144 it = interval_tree_iter_first(&mn->objects, start, end);
75 if (it != NULL) { 145 if (it != NULL) {
@@ -81,31 +151,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
81 if (obj == NULL) 151 if (obj == NULL)
82 return; 152 return;
83 153
84 mutex_lock(&mn->dev->struct_mutex); 154 next = cancel_userptr(obj);
85 /* Cancel any active worker and force us to re-evaluate gup */
86 obj->userptr.work = NULL;
87
88 if (obj->pages != NULL) {
89 struct drm_i915_private *dev_priv = to_i915(mn->dev);
90 struct i915_vma *vma, *tmp;
91 bool was_interruptible;
92
93 was_interruptible = dev_priv->mm.interruptible;
94 dev_priv->mm.interruptible = false;
95
96 list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
97 int ret = i915_vma_unbind(vma);
98 WARN_ON(ret && ret != -EIO);
99 }
100 WARN_ON(i915_gem_object_put_pages(obj));
101
102 dev_priv->mm.interruptible = was_interruptible;
103 }
104
105 start = obj->userptr.ptr + obj->base.size;
106
107 drm_gem_object_unreference(&obj->base);
108 mutex_unlock(&mn->dev->struct_mutex);
109 } 155 }
110} 156}
111 157
@@ -150,7 +196,9 @@ i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
150 mmu->mm = mm; 196 mmu->mm = mm;
151 mmu->objects = RB_ROOT; 197 mmu->objects = RB_ROOT;
152 mmu->count = 0; 198 mmu->count = 0;
153 mmu->serial = 0; 199 mmu->serial = 1;
200 INIT_LIST_HEAD(&mmu->linear);
201 mmu->has_linear = false;
154 202
155 /* Protected by mmap_sem (write-lock) */ 203 /* Protected by mmap_sem (write-lock) */
156 ret = __mmu_notifier_register(&mmu->mn, mm); 204 ret = __mmu_notifier_register(&mmu->mn, mm);
@@ -197,6 +245,17 @@ static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
197 mmu->serial = 1; 245 mmu->serial = 1;
198} 246}
199 247
248static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
249{
250 struct i915_mmu_object *mn;
251
252 list_for_each_entry(mn, &mmu->linear, link)
253 if (mn->is_linear)
254 return true;
255
256 return false;
257}
258
200static void 259static void
201i915_mmu_notifier_del(struct i915_mmu_notifier *mmu, 260i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
202 struct i915_mmu_object *mn) 261 struct i915_mmu_object *mn)
@@ -204,7 +263,11 @@ i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
204 lockdep_assert_held(&mmu->dev->struct_mutex); 263 lockdep_assert_held(&mmu->dev->struct_mutex);
205 264
206 spin_lock(&mmu->lock); 265 spin_lock(&mmu->lock);
207 interval_tree_remove(&mn->it, &mmu->objects); 266 list_del(&mn->link);
267 if (mn->is_linear)
268 mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
269 else
270 interval_tree_remove(&mn->it, &mmu->objects);
208 __i915_mmu_notifier_update_serial(mmu); 271 __i915_mmu_notifier_update_serial(mmu);
209 spin_unlock(&mmu->lock); 272 spin_unlock(&mmu->lock);
210 273
@@ -230,7 +293,6 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
230 */ 293 */
231 i915_gem_retire_requests(mmu->dev); 294 i915_gem_retire_requests(mmu->dev);
232 295
233 /* Disallow overlapping userptr objects */
234 spin_lock(&mmu->lock); 296 spin_lock(&mmu->lock);
235 it = interval_tree_iter_first(&mmu->objects, 297 it = interval_tree_iter_first(&mmu->objects,
236 mn->it.start, mn->it.last); 298 mn->it.start, mn->it.last);
@@ -243,14 +305,22 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
243 * to flush their object references upon which the object will 305 * to flush their object references upon which the object will
244 * be removed from the interval-tree, or the the range is 306 * be removed from the interval-tree, or the the range is
245 * still in use by another client and the overlap is invalid. 307 * still in use by another client and the overlap is invalid.
308 *
309 * If we do have an overlap, we cannot use the interval tree
310 * for fast range invalidation.
246 */ 311 */
247 312
248 obj = container_of(it, struct i915_mmu_object, it)->obj; 313 obj = container_of(it, struct i915_mmu_object, it)->obj;
249 ret = obj->userptr.workers ? -EAGAIN : -EINVAL; 314 if (!obj->userptr.workers)
250 } else { 315 mmu->has_linear = mn->is_linear = true;
316 else
317 ret = -EAGAIN;
318 } else
251 interval_tree_insert(&mn->it, &mmu->objects); 319 interval_tree_insert(&mn->it, &mmu->objects);
320
321 if (ret == 0) {
322 list_add(&mn->link, &mmu->linear);
252 __i915_mmu_notifier_update_serial(mmu); 323 __i915_mmu_notifier_update_serial(mmu);
253 ret = 0;
254 } 324 }
255 spin_unlock(&mmu->lock); 325 spin_unlock(&mmu->lock);
256 mutex_unlock(&mmu->dev->struct_mutex); 326 mutex_unlock(&mmu->dev->struct_mutex);
@@ -611,12 +681,11 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
611 * We impose several restrictions upon the memory being mapped 681 * We impose several restrictions upon the memory being mapped
612 * into the GPU. 682 * into the GPU.
613 * 1. It must be page aligned (both start/end addresses, i.e ptr and size). 683 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
614 * 2. It cannot overlap any other userptr object in the same address space. 684 * 2. It must be normal system memory, not a pointer into another map of IO
615 * 3. It must be normal system memory, not a pointer into another map of IO
616 * space (e.g. it must not be a GTT mmapping of another object). 685 * space (e.g. it must not be a GTT mmapping of another object).
617 * 4. We only allow a bo as large as we could in theory map into the GTT, 686 * 3. We only allow a bo as large as we could in theory map into the GTT,
618 * that is we limit the size to the total size of the GTT. 687 * that is we limit the size to the total size of the GTT.
619 * 5. The bo is marked as being snoopable. The backing pages are left 688 * 4. The bo is marked as being snoopable. The backing pages are left
620 * accessible directly by the CPU, but reads and writes by the GPU may 689 * accessible directly by the CPU, but reads and writes by the GPU may
621 * incur the cost of a snoop (unless you have an LLC architecture). 690 * incur the cost of a snoop (unless you have an LLC architecture).
622 * 691 *
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 66cf41765bf9..0b3f69439451 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -327,6 +327,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
327 struct drm_device *dev = error_priv->dev; 327 struct drm_device *dev = error_priv->dev;
328 struct drm_i915_private *dev_priv = dev->dev_private; 328 struct drm_i915_private *dev_priv = dev->dev_private;
329 struct drm_i915_error_state *error = error_priv->error; 329 struct drm_i915_error_state *error = error_priv->error;
330 struct drm_i915_error_object *obj;
330 int i, j, offset, elt; 331 int i, j, offset, elt;
331 int max_hangcheck_score; 332 int max_hangcheck_score;
332 333
@@ -395,8 +396,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
395 error->pinned_bo_count[0]); 396 error->pinned_bo_count[0]);
396 397
397 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 398 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
398 struct drm_i915_error_object *obj;
399
400 obj = error->ring[i].batchbuffer; 399 obj = error->ring[i].batchbuffer;
401 if (obj) { 400 if (obj) {
402 err_puts(m, dev_priv->ring[i].name); 401 err_puts(m, dev_priv->ring[i].name);
@@ -459,6 +458,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
459 } 458 }
460 } 459 }
461 460
461 if ((obj = error->semaphore_obj)) {
462 err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset);
463 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
464 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
465 elt * 4,
466 obj->pages[0][elt],
467 obj->pages[0][elt+1],
468 obj->pages[0][elt+2],
469 obj->pages[0][elt+3]);
470 }
471 }
472
462 if (error->overlay) 473 if (error->overlay)
463 intel_overlay_print_error_state(m, error->overlay); 474 intel_overlay_print_error_state(m, error->overlay);
464 475
@@ -529,6 +540,7 @@ static void i915_error_state_free(struct kref *error_ref)
529 kfree(error->ring[i].requests); 540 kfree(error->ring[i].requests);
530 } 541 }
531 542
543 i915_error_object_free(error->semaphore_obj);
532 kfree(error->active_bo); 544 kfree(error->active_bo);
533 kfree(error->overlay); 545 kfree(error->overlay);
534 kfree(error->display); 546 kfree(error->display);
@@ -746,7 +758,59 @@ static void i915_gem_record_fences(struct drm_device *dev,
746 } 758 }
747} 759}
748 760
761
762static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
763 struct drm_i915_error_state *error,
764 struct intel_engine_cs *ring,
765 struct drm_i915_error_ring *ering)
766{
767 struct intel_engine_cs *to;
768 int i;
769
770 if (!i915_semaphore_is_enabled(dev_priv->dev))
771 return;
772
773 if (!error->semaphore_obj)
774 error->semaphore_obj =
775 i915_error_object_create(dev_priv,
776 dev_priv->semaphore_obj,
777 &dev_priv->gtt.base);
778
779 for_each_ring(to, dev_priv, i) {
780 int idx;
781 u16 signal_offset;
782 u32 *tmp;
783
784 if (ring == to)
785 continue;
786
787 signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & PAGE_MASK) / 4;
788 tmp = error->semaphore_obj->pages[0];
789 idx = intel_ring_sync_index(ring, to);
790
791 ering->semaphore_mboxes[idx] = tmp[signal_offset];
792 ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
793 }
794}
795
796static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
797 struct intel_engine_cs *ring,
798 struct drm_i915_error_ring *ering)
799{
800 ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
801 ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
802 ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
803 ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
804
805 if (HAS_VEBOX(dev_priv->dev)) {
806 ering->semaphore_mboxes[2] =
807 I915_READ(RING_SYNC_2(ring->mmio_base));
808 ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
809 }
810}
811
749static void i915_record_ring_state(struct drm_device *dev, 812static void i915_record_ring_state(struct drm_device *dev,
813 struct drm_i915_error_state *error,
750 struct intel_engine_cs *ring, 814 struct intel_engine_cs *ring,
751 struct drm_i915_error_ring *ering) 815 struct drm_i915_error_ring *ering)
752{ 816{
@@ -755,18 +819,10 @@ static void i915_record_ring_state(struct drm_device *dev,
755 if (INTEL_INFO(dev)->gen >= 6) { 819 if (INTEL_INFO(dev)->gen >= 6) {
756 ering->rc_psmi = I915_READ(ring->mmio_base + 0x50); 820 ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
757 ering->fault_reg = I915_READ(RING_FAULT_REG(ring)); 821 ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
758 ering->semaphore_mboxes[0] 822 if (INTEL_INFO(dev)->gen >= 8)
759 = I915_READ(RING_SYNC_0(ring->mmio_base)); 823 gen8_record_semaphore_state(dev_priv, error, ring, ering);
760 ering->semaphore_mboxes[1] 824 else
761 = I915_READ(RING_SYNC_1(ring->mmio_base)); 825 gen6_record_semaphore_state(dev_priv, ring, ering);
762 ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
763 ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
764 }
765
766 if (HAS_VEBOX(dev)) {
767 ering->semaphore_mboxes[2] =
768 I915_READ(RING_SYNC_2(ring->mmio_base));
769 ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
770 } 826 }
771 827
772 if (INTEL_INFO(dev)->gen >= 4) { 828 if (INTEL_INFO(dev)->gen >= 4) {
@@ -871,6 +927,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *ring,
871 return; 927 return;
872 928
873 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 929 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
930 if (!i915_gem_obj_ggtt_bound(obj))
931 continue;
932
874 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { 933 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
875 ering->ctx = i915_error_ggtt_object_create(dev_priv, obj); 934 ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
876 break; 935 break;
@@ -895,7 +954,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
895 954
896 error->ring[i].valid = true; 955 error->ring[i].valid = true;
897 956
898 i915_record_ring_state(dev, ring, &error->ring[i]); 957 i915_record_ring_state(dev, error, ring, &error->ring[i]);
899 958
900 request = i915_gem_find_active_request(ring); 959 request = i915_gem_find_active_request(ring);
901 if (request) { 960 if (request) {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c05c84f3f091..6ef9d6fabf80 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -136,7 +136,7 @@ ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
136{ 136{
137 assert_spin_locked(&dev_priv->irq_lock); 137 assert_spin_locked(&dev_priv->irq_lock);
138 138
139 if (WARN_ON(dev_priv->pm.irqs_disabled)) 139 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
140 return; 140 return;
141 141
142 if ((dev_priv->irq_mask & mask) != 0) { 142 if ((dev_priv->irq_mask & mask) != 0) {
@@ -151,7 +151,7 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
151{ 151{
152 assert_spin_locked(&dev_priv->irq_lock); 152 assert_spin_locked(&dev_priv->irq_lock);
153 153
154 if (WARN_ON(dev_priv->pm.irqs_disabled)) 154 if (!intel_irqs_enabled(dev_priv))
155 return; 155 return;
156 156
157 if ((dev_priv->irq_mask & mask) != mask) { 157 if ((dev_priv->irq_mask & mask) != mask) {
@@ -173,7 +173,7 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
173{ 173{
174 assert_spin_locked(&dev_priv->irq_lock); 174 assert_spin_locked(&dev_priv->irq_lock);
175 175
176 if (WARN_ON(dev_priv->pm.irqs_disabled)) 176 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
177 return; 177 return;
178 178
179 dev_priv->gt_irq_mask &= ~interrupt_mask; 179 dev_priv->gt_irq_mask &= ~interrupt_mask;
@@ -182,12 +182,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
182 POSTING_READ(GTIMR); 182 POSTING_READ(GTIMR);
183} 183}
184 184
185void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 185void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
186{ 186{
187 ilk_update_gt_irq(dev_priv, mask, mask); 187 ilk_update_gt_irq(dev_priv, mask, mask);
188} 188}
189 189
190void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 190void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
191{ 191{
192 ilk_update_gt_irq(dev_priv, mask, 0); 192 ilk_update_gt_irq(dev_priv, mask, 0);
193} 193}
@@ -206,7 +206,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
206 206
207 assert_spin_locked(&dev_priv->irq_lock); 207 assert_spin_locked(&dev_priv->irq_lock);
208 208
209 if (WARN_ON(dev_priv->pm.irqs_disabled)) 209 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
210 return; 210 return;
211 211
212 new_val = dev_priv->pm_irq_mask; 212 new_val = dev_priv->pm_irq_mask;
@@ -220,12 +220,12 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
220 } 220 }
221} 221}
222 222
223void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 223void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
224{ 224{
225 snb_update_pm_irq(dev_priv, mask, mask); 225 snb_update_pm_irq(dev_priv, mask, mask);
226} 226}
227 227
228void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 228void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
229{ 229{
230 snb_update_pm_irq(dev_priv, mask, 0); 230 snb_update_pm_irq(dev_priv, mask, 0);
231} 231}
@@ -264,7 +264,7 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
264 264
265 assert_spin_locked(&dev_priv->irq_lock); 265 assert_spin_locked(&dev_priv->irq_lock);
266 266
267 if (WARN_ON(dev_priv->pm.irqs_disabled)) 267 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
268 return; 268 return;
269 269
270 new_val = dev_priv->pm_irq_mask; 270 new_val = dev_priv->pm_irq_mask;
@@ -278,12 +278,12 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
278 } 278 }
279} 279}
280 280
281void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 281void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
282{ 282{
283 bdw_update_pm_irq(dev_priv, mask, mask); 283 bdw_update_pm_irq(dev_priv, mask, mask);
284} 284}
285 285
286void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 286void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
287{ 287{
288 bdw_update_pm_irq(dev_priv, mask, 0); 288 bdw_update_pm_irq(dev_priv, mask, 0);
289} 289}
@@ -420,7 +420,7 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
420 420
421 assert_spin_locked(&dev_priv->irq_lock); 421 assert_spin_locked(&dev_priv->irq_lock);
422 422
423 if (WARN_ON(dev_priv->pm.irqs_disabled)) 423 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
424 return; 424 return;
425 425
426 I915_WRITE(SDEIMR, sdeimr); 426 I915_WRITE(SDEIMR, sdeimr);
@@ -1090,6 +1090,53 @@ static bool intel_hpd_irq_event(struct drm_device *dev,
1090 return true; 1090 return true;
1091} 1091}
1092 1092
1093static void i915_digport_work_func(struct work_struct *work)
1094{
1095 struct drm_i915_private *dev_priv =
1096 container_of(work, struct drm_i915_private, dig_port_work);
1097 unsigned long irqflags;
1098 u32 long_port_mask, short_port_mask;
1099 struct intel_digital_port *intel_dig_port;
1100 int i, ret;
1101 u32 old_bits = 0;
1102
1103 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1104 long_port_mask = dev_priv->long_hpd_port_mask;
1105 dev_priv->long_hpd_port_mask = 0;
1106 short_port_mask = dev_priv->short_hpd_port_mask;
1107 dev_priv->short_hpd_port_mask = 0;
1108 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1109
1110 for (i = 0; i < I915_MAX_PORTS; i++) {
1111 bool valid = false;
1112 bool long_hpd = false;
1113 intel_dig_port = dev_priv->hpd_irq_port[i];
1114 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
1115 continue;
1116
1117 if (long_port_mask & (1 << i)) {
1118 valid = true;
1119 long_hpd = true;
1120 } else if (short_port_mask & (1 << i))
1121 valid = true;
1122
1123 if (valid) {
1124 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
1125 if (ret == true) {
1126 /* if we get true fallback to old school hpd */
1127 old_bits |= (1 << intel_dig_port->base.hpd_pin);
1128 }
1129 }
1130 }
1131
1132 if (old_bits) {
1133 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1134 dev_priv->hpd_event_bits |= old_bits;
1135 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1136 schedule_work(&dev_priv->hotplug_work);
1137 }
1138}
1139
1093/* 1140/*
1094 * Handle hotplug events outside the interrupt handler proper. 1141 * Handle hotplug events outside the interrupt handler proper.
1095 */ 1142 */
@@ -1109,10 +1156,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
1109 bool changed = false; 1156 bool changed = false;
1110 u32 hpd_event_bits; 1157 u32 hpd_event_bits;
1111 1158
1112 /* HPD irq before everything is fully set up. */
1113 if (!dev_priv->enable_hotplug_processing)
1114 return;
1115
1116 mutex_lock(&mode_config->mutex); 1159 mutex_lock(&mode_config->mutex);
1117 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 1160 DRM_DEBUG_KMS("running encoder hotplug functions\n");
1118 1161
@@ -1122,6 +1165,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
1122 dev_priv->hpd_event_bits = 0; 1165 dev_priv->hpd_event_bits = 0;
1123 list_for_each_entry(connector, &mode_config->connector_list, head) { 1166 list_for_each_entry(connector, &mode_config->connector_list, head) {
1124 intel_connector = to_intel_connector(connector); 1167 intel_connector = to_intel_connector(connector);
1168 if (!intel_connector->encoder)
1169 continue;
1125 intel_encoder = intel_connector->encoder; 1170 intel_encoder = intel_connector->encoder;
1126 if (intel_encoder->hpd_pin > HPD_NONE && 1171 if (intel_encoder->hpd_pin > HPD_NONE &&
1127 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 1172 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
@@ -1152,6 +1197,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
1152 1197
1153 list_for_each_entry(connector, &mode_config->connector_list, head) { 1198 list_for_each_entry(connector, &mode_config->connector_list, head) {
1154 intel_connector = to_intel_connector(connector); 1199 intel_connector = to_intel_connector(connector);
1200 if (!intel_connector->encoder)
1201 continue;
1155 intel_encoder = intel_connector->encoder; 1202 intel_encoder = intel_connector->encoder;
1156 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 1203 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1157 if (intel_encoder->hot_plug) 1204 if (intel_encoder->hot_plug)
@@ -1218,10 +1265,138 @@ static void notify_ring(struct drm_device *dev,
1218 1265
1219 trace_i915_gem_request_complete(ring); 1266 trace_i915_gem_request_complete(ring);
1220 1267
1268 if (drm_core_check_feature(dev, DRIVER_MODESET))
1269 intel_notify_mmio_flip(ring);
1270
1221 wake_up_all(&ring->irq_queue); 1271 wake_up_all(&ring->irq_queue);
1222 i915_queue_hangcheck(dev); 1272 i915_queue_hangcheck(dev);
1223} 1273}
1224 1274
1275static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1276 struct intel_rps_ei *rps_ei)
1277{
1278 u32 cz_ts, cz_freq_khz;
1279 u32 render_count, media_count;
1280 u32 elapsed_render, elapsed_media, elapsed_time;
1281 u32 residency = 0;
1282
1283 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1284 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1285
1286 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1287 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1288
1289 if (rps_ei->cz_clock == 0) {
1290 rps_ei->cz_clock = cz_ts;
1291 rps_ei->render_c0 = render_count;
1292 rps_ei->media_c0 = media_count;
1293
1294 return dev_priv->rps.cur_freq;
1295 }
1296
1297 elapsed_time = cz_ts - rps_ei->cz_clock;
1298 rps_ei->cz_clock = cz_ts;
1299
1300 elapsed_render = render_count - rps_ei->render_c0;
1301 rps_ei->render_c0 = render_count;
1302
1303 elapsed_media = media_count - rps_ei->media_c0;
1304 rps_ei->media_c0 = media_count;
1305
1306 /* Convert all the counters into common unit of milli sec */
1307 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1308 elapsed_render /= cz_freq_khz;
1309 elapsed_media /= cz_freq_khz;
1310
1311 /*
1312 * Calculate overall C0 residency percentage
1313 * only if elapsed time is non zero
1314 */
1315 if (elapsed_time) {
1316 residency =
1317 ((max(elapsed_render, elapsed_media) * 100)
1318 / elapsed_time);
1319 }
1320
1321 return residency;
1322}
1323
1324/**
1325 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1326 * busy-ness calculated from C0 counters of render & media power wells
1327 * @dev_priv: DRM device private
1328 *
1329 */
1330static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1331{
1332 u32 residency_C0_up = 0, residency_C0_down = 0;
1333 u8 new_delay, adj;
1334
1335 dev_priv->rps.ei_interrupt_count++;
1336
1337 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1338
1339
1340 if (dev_priv->rps.up_ei.cz_clock == 0) {
1341 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1342 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1343 return dev_priv->rps.cur_freq;
1344 }
1345
1346
1347 /*
1348 * To down throttle, C0 residency should be less than down threshold
1349 * for continous EI intervals. So calculate down EI counters
1350 * once in VLV_INT_COUNT_FOR_DOWN_EI
1351 */
1352 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1353
1354 dev_priv->rps.ei_interrupt_count = 0;
1355
1356 residency_C0_down = vlv_c0_residency(dev_priv,
1357 &dev_priv->rps.down_ei);
1358 } else {
1359 residency_C0_up = vlv_c0_residency(dev_priv,
1360 &dev_priv->rps.up_ei);
1361 }
1362
1363 new_delay = dev_priv->rps.cur_freq;
1364
1365 adj = dev_priv->rps.last_adj;
1366 /* C0 residency is greater than UP threshold. Increase Frequency */
1367 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1368 if (adj > 0)
1369 adj *= 2;
1370 else
1371 adj = 1;
1372
1373 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1374 new_delay = dev_priv->rps.cur_freq + adj;
1375
1376 /*
1377 * For better performance, jump directly
1378 * to RPe if we're below it.
1379 */
1380 if (new_delay < dev_priv->rps.efficient_freq)
1381 new_delay = dev_priv->rps.efficient_freq;
1382
1383 } else if (!dev_priv->rps.ei_interrupt_count &&
1384 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1385 if (adj < 0)
1386 adj *= 2;
1387 else
1388 adj = -1;
1389 /*
1390 * This means, C0 residency is less than down threshold over
1391 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1392 */
1393 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1394 new_delay = dev_priv->rps.cur_freq + adj;
1395 }
1396
1397 return new_delay;
1398}
1399
1225static void gen6_pm_rps_work(struct work_struct *work) 1400static void gen6_pm_rps_work(struct work_struct *work)
1226{ 1401{
1227 struct drm_i915_private *dev_priv = 1402 struct drm_i915_private *dev_priv =
@@ -1232,11 +1407,11 @@ static void gen6_pm_rps_work(struct work_struct *work)
1232 spin_lock_irq(&dev_priv->irq_lock); 1407 spin_lock_irq(&dev_priv->irq_lock);
1233 pm_iir = dev_priv->rps.pm_iir; 1408 pm_iir = dev_priv->rps.pm_iir;
1234 dev_priv->rps.pm_iir = 0; 1409 dev_priv->rps.pm_iir = 0;
1235 if (IS_BROADWELL(dev_priv->dev)) 1410 if (INTEL_INFO(dev_priv->dev)->gen >= 8)
1236 bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1411 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1237 else { 1412 else {
1238 /* Make sure not to corrupt PMIMR state used by ringbuffer */ 1413 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1239 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1414 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1240 } 1415 }
1241 spin_unlock_irq(&dev_priv->irq_lock); 1416 spin_unlock_irq(&dev_priv->irq_lock);
1242 1417
@@ -1252,8 +1427,10 @@ static void gen6_pm_rps_work(struct work_struct *work)
1252 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1427 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1253 if (adj > 0) 1428 if (adj > 0)
1254 adj *= 2; 1429 adj *= 2;
1255 else 1430 else {
1256 adj = 1; 1431 /* CHV needs even encode values */
1432 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1433 }
1257 new_delay = dev_priv->rps.cur_freq + adj; 1434 new_delay = dev_priv->rps.cur_freq + adj;
1258 1435
1259 /* 1436 /*
@@ -1268,11 +1445,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
1268 else 1445 else
1269 new_delay = dev_priv->rps.min_freq_softlimit; 1446 new_delay = dev_priv->rps.min_freq_softlimit;
1270 adj = 0; 1447 adj = 0;
1448 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1449 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1271 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1450 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1272 if (adj < 0) 1451 if (adj < 0)
1273 adj *= 2; 1452 adj *= 2;
1274 else 1453 else {
1275 adj = -1; 1454 /* CHV needs even encode values */
1455 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1456 }
1276 new_delay = dev_priv->rps.cur_freq + adj; 1457 new_delay = dev_priv->rps.cur_freq + adj;
1277 } else { /* unknown event */ 1458 } else { /* unknown event */
1278 new_delay = dev_priv->rps.cur_freq; 1459 new_delay = dev_priv->rps.cur_freq;
@@ -1372,7 +1553,7 @@ static void ivybridge_parity_work(struct work_struct *work)
1372out: 1553out:
1373 WARN_ON(dev_priv->l3_parity.which_slice); 1554 WARN_ON(dev_priv->l3_parity.which_slice);
1374 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1555 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1375 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1556 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1376 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1557 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1377 1558
1378 mutex_unlock(&dev_priv->dev->struct_mutex); 1559 mutex_unlock(&dev_priv->dev->struct_mutex);
@@ -1386,7 +1567,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1386 return; 1567 return;
1387 1568
1388 spin_lock(&dev_priv->irq_lock); 1569 spin_lock(&dev_priv->irq_lock);
1389 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1570 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1390 spin_unlock(&dev_priv->irq_lock); 1571 spin_unlock(&dev_priv->irq_lock);
1391 1572
1392 iir &= GT_PARITY_ERROR(dev); 1573 iir &= GT_PARITY_ERROR(dev);
@@ -1441,7 +1622,7 @@ static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1441 1622
1442 spin_lock(&dev_priv->irq_lock); 1623 spin_lock(&dev_priv->irq_lock);
1443 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1624 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1444 bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1625 gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1445 spin_unlock(&dev_priv->irq_lock); 1626 spin_unlock(&dev_priv->irq_lock);
1446 1627
1447 queue_work(dev_priv->wq, &dev_priv->rps.work); 1628 queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -1458,6 +1639,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1458 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1639 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1459 tmp = I915_READ(GEN8_GT_IIR(0)); 1640 tmp = I915_READ(GEN8_GT_IIR(0));
1460 if (tmp) { 1641 if (tmp) {
1642 I915_WRITE(GEN8_GT_IIR(0), tmp);
1461 ret = IRQ_HANDLED; 1643 ret = IRQ_HANDLED;
1462 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1644 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1463 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1645 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
@@ -1465,7 +1647,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1465 notify_ring(dev, &dev_priv->ring[RCS]); 1647 notify_ring(dev, &dev_priv->ring[RCS]);
1466 if (bcs & GT_RENDER_USER_INTERRUPT) 1648 if (bcs & GT_RENDER_USER_INTERRUPT)
1467 notify_ring(dev, &dev_priv->ring[BCS]); 1649 notify_ring(dev, &dev_priv->ring[BCS]);
1468 I915_WRITE(GEN8_GT_IIR(0), tmp);
1469 } else 1650 } else
1470 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1651 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1471 } 1652 }
@@ -1473,6 +1654,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1473 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1654 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1474 tmp = I915_READ(GEN8_GT_IIR(1)); 1655 tmp = I915_READ(GEN8_GT_IIR(1));
1475 if (tmp) { 1656 if (tmp) {
1657 I915_WRITE(GEN8_GT_IIR(1), tmp);
1476 ret = IRQ_HANDLED; 1658 ret = IRQ_HANDLED;
1477 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1659 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1478 if (vcs & GT_RENDER_USER_INTERRUPT) 1660 if (vcs & GT_RENDER_USER_INTERRUPT)
@@ -1480,7 +1662,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1480 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; 1662 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1481 if (vcs & GT_RENDER_USER_INTERRUPT) 1663 if (vcs & GT_RENDER_USER_INTERRUPT)
1482 notify_ring(dev, &dev_priv->ring[VCS2]); 1664 notify_ring(dev, &dev_priv->ring[VCS2]);
1483 I915_WRITE(GEN8_GT_IIR(1), tmp);
1484 } else 1665 } else
1485 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1666 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1486 } 1667 }
@@ -1488,10 +1669,10 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1488 if (master_ctl & GEN8_GT_PM_IRQ) { 1669 if (master_ctl & GEN8_GT_PM_IRQ) {
1489 tmp = I915_READ(GEN8_GT_IIR(2)); 1670 tmp = I915_READ(GEN8_GT_IIR(2));
1490 if (tmp & dev_priv->pm_rps_events) { 1671 if (tmp & dev_priv->pm_rps_events) {
1491 ret = IRQ_HANDLED;
1492 gen8_rps_irq_handler(dev_priv, tmp);
1493 I915_WRITE(GEN8_GT_IIR(2), 1672 I915_WRITE(GEN8_GT_IIR(2),
1494 tmp & dev_priv->pm_rps_events); 1673 tmp & dev_priv->pm_rps_events);
1674 ret = IRQ_HANDLED;
1675 gen8_rps_irq_handler(dev_priv, tmp);
1495 } else 1676 } else
1496 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1677 DRM_ERROR("The master control interrupt lied (PM)!\n");
1497 } 1678 }
@@ -1499,11 +1680,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1499 if (master_ctl & GEN8_GT_VECS_IRQ) { 1680 if (master_ctl & GEN8_GT_VECS_IRQ) {
1500 tmp = I915_READ(GEN8_GT_IIR(3)); 1681 tmp = I915_READ(GEN8_GT_IIR(3));
1501 if (tmp) { 1682 if (tmp) {
1683 I915_WRITE(GEN8_GT_IIR(3), tmp);
1502 ret = IRQ_HANDLED; 1684 ret = IRQ_HANDLED;
1503 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1685 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1504 if (vcs & GT_RENDER_USER_INTERRUPT) 1686 if (vcs & GT_RENDER_USER_INTERRUPT)
1505 notify_ring(dev, &dev_priv->ring[VECS]); 1687 notify_ring(dev, &dev_priv->ring[VECS]);
1506 I915_WRITE(GEN8_GT_IIR(3), tmp);
1507 } else 1688 } else
1508 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1689 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1509 } 1690 }
@@ -1514,23 +1695,104 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1514#define HPD_STORM_DETECT_PERIOD 1000 1695#define HPD_STORM_DETECT_PERIOD 1000
1515#define HPD_STORM_THRESHOLD 5 1696#define HPD_STORM_THRESHOLD 5
1516 1697
1698static int ilk_port_to_hotplug_shift(enum port port)
1699{
1700 switch (port) {
1701 case PORT_A:
1702 case PORT_E:
1703 default:
1704 return -1;
1705 case PORT_B:
1706 return 0;
1707 case PORT_C:
1708 return 8;
1709 case PORT_D:
1710 return 16;
1711 }
1712}
1713
1714static int g4x_port_to_hotplug_shift(enum port port)
1715{
1716 switch (port) {
1717 case PORT_A:
1718 case PORT_E:
1719 default:
1720 return -1;
1721 case PORT_B:
1722 return 17;
1723 case PORT_C:
1724 return 19;
1725 case PORT_D:
1726 return 21;
1727 }
1728}
1729
1730static inline enum port get_port_from_pin(enum hpd_pin pin)
1731{
1732 switch (pin) {
1733 case HPD_PORT_B:
1734 return PORT_B;
1735 case HPD_PORT_C:
1736 return PORT_C;
1737 case HPD_PORT_D:
1738 return PORT_D;
1739 default:
1740 return PORT_A; /* no hpd */
1741 }
1742}
1743
1517static inline void intel_hpd_irq_handler(struct drm_device *dev, 1744static inline void intel_hpd_irq_handler(struct drm_device *dev,
1518 u32 hotplug_trigger, 1745 u32 hotplug_trigger,
1746 u32 dig_hotplug_reg,
1519 const u32 *hpd) 1747 const u32 *hpd)
1520{ 1748{
1521 struct drm_i915_private *dev_priv = dev->dev_private; 1749 struct drm_i915_private *dev_priv = dev->dev_private;
1522 int i; 1750 int i;
1751 enum port port;
1523 bool storm_detected = false; 1752 bool storm_detected = false;
1753 bool queue_dig = false, queue_hp = false;
1754 u32 dig_shift;
1755 u32 dig_port_mask = 0;
1524 1756
1525 if (!hotplug_trigger) 1757 if (!hotplug_trigger)
1526 return; 1758 return;
1527 1759
1528 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1760 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1529 hotplug_trigger); 1761 hotplug_trigger, dig_hotplug_reg);
1530 1762
1531 spin_lock(&dev_priv->irq_lock); 1763 spin_lock(&dev_priv->irq_lock);
1532 for (i = 1; i < HPD_NUM_PINS; i++) { 1764 for (i = 1; i < HPD_NUM_PINS; i++) {
1765 if (!(hpd[i] & hotplug_trigger))
1766 continue;
1767
1768 port = get_port_from_pin(i);
1769 if (port && dev_priv->hpd_irq_port[port]) {
1770 bool long_hpd;
1533 1771
1772 if (IS_G4X(dev)) {
1773 dig_shift = g4x_port_to_hotplug_shift(port);
1774 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1775 } else {
1776 dig_shift = ilk_port_to_hotplug_shift(port);
1777 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1778 }
1779
1780 DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
1781 /* for long HPD pulses we want to have the digital queue happen,
1782 but we still want HPD storm detection to function. */
1783 if (long_hpd) {
1784 dev_priv->long_hpd_port_mask |= (1 << port);
1785 dig_port_mask |= hpd[i];
1786 } else {
1787 /* for short HPD just trigger the digital queue */
1788 dev_priv->short_hpd_port_mask |= (1 << port);
1789 hotplug_trigger &= ~hpd[i];
1790 }
1791 queue_dig = true;
1792 }
1793 }
1794
1795 for (i = 1; i < HPD_NUM_PINS; i++) {
1534 if (hpd[i] & hotplug_trigger && 1796 if (hpd[i] & hotplug_trigger &&
1535 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { 1797 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1536 /* 1798 /*
@@ -1550,7 +1812,11 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1550 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1812 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1551 continue; 1813 continue;
1552 1814
1553 dev_priv->hpd_event_bits |= (1 << i); 1815 if (!(dig_port_mask & hpd[i])) {
1816 dev_priv->hpd_event_bits |= (1 << i);
1817 queue_hp = true;
1818 }
1819
1554 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1820 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1555 dev_priv->hpd_stats[i].hpd_last_jiffies 1821 dev_priv->hpd_stats[i].hpd_last_jiffies
1556 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1822 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
@@ -1579,7 +1845,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1579 * queue for otherwise the flush_work in the pageflip code will 1845 * queue for otherwise the flush_work in the pageflip code will
1580 * deadlock. 1846 * deadlock.
1581 */ 1847 */
1582 schedule_work(&dev_priv->hotplug_work); 1848 if (queue_dig)
1849 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1850 if (queue_hp)
1851 schedule_work(&dev_priv->hotplug_work);
1583} 1852}
1584 1853
1585static void gmbus_irq_handler(struct drm_device *dev) 1854static void gmbus_irq_handler(struct drm_device *dev)
@@ -1700,7 +1969,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1700 if (pm_iir & dev_priv->pm_rps_events) { 1969 if (pm_iir & dev_priv->pm_rps_events) {
1701 spin_lock(&dev_priv->irq_lock); 1970 spin_lock(&dev_priv->irq_lock);
1702 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1971 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1703 snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1972 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1704 spin_unlock(&dev_priv->irq_lock); 1973 spin_unlock(&dev_priv->irq_lock);
1705 1974
1706 queue_work(dev_priv->wq, &dev_priv->rps.work); 1975 queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -1809,26 +2078,28 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
1809 struct drm_i915_private *dev_priv = dev->dev_private; 2078 struct drm_i915_private *dev_priv = dev->dev_private;
1810 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2079 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1811 2080
1812 if (IS_G4X(dev)) { 2081 if (hotplug_status) {
1813 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 2082 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2083 /*
2084 * Make sure hotplug status is cleared before we clear IIR, or else we
2085 * may miss hotplug events.
2086 */
2087 POSTING_READ(PORT_HOTPLUG_STAT);
1814 2088
1815 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x); 2089 if (IS_G4X(dev)) {
1816 } else { 2090 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1817 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1818 2091
1819 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 2092 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
1820 } 2093 } else {
2094 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1821 2095
1822 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && 2096 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
1823 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 2097 }
1824 dp_aux_irq_handler(dev);
1825 2098
1826 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2099 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1827 /* 2100 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1828 * Make sure hotplug status is cleared before we clear IIR, or else we 2101 dp_aux_irq_handler(dev);
1829 * may miss hotplug events. 2102 }
1830 */
1831 POSTING_READ(PORT_HOTPLUG_STAT);
1832} 2103}
1833 2104
1834static irqreturn_t valleyview_irq_handler(int irq, void *arg) 2105static irqreturn_t valleyview_irq_handler(int irq, void *arg)
@@ -1839,29 +2110,36 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1839 irqreturn_t ret = IRQ_NONE; 2110 irqreturn_t ret = IRQ_NONE;
1840 2111
1841 while (true) { 2112 while (true) {
1842 iir = I915_READ(VLV_IIR); 2113 /* Find, clear, then process each source of interrupt */
2114
1843 gt_iir = I915_READ(GTIIR); 2115 gt_iir = I915_READ(GTIIR);
2116 if (gt_iir)
2117 I915_WRITE(GTIIR, gt_iir);
2118
1844 pm_iir = I915_READ(GEN6_PMIIR); 2119 pm_iir = I915_READ(GEN6_PMIIR);
2120 if (pm_iir)
2121 I915_WRITE(GEN6_PMIIR, pm_iir);
2122
2123 iir = I915_READ(VLV_IIR);
2124 if (iir) {
2125 /* Consume port before clearing IIR or we'll miss events */
2126 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2127 i9xx_hpd_irq_handler(dev);
2128 I915_WRITE(VLV_IIR, iir);
2129 }
1845 2130
1846 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 2131 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1847 goto out; 2132 goto out;
1848 2133
1849 ret = IRQ_HANDLED; 2134 ret = IRQ_HANDLED;
1850 2135
1851 snb_gt_irq_handler(dev, dev_priv, gt_iir); 2136 if (gt_iir)
1852 2137 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1853 valleyview_pipestat_irq_handler(dev, iir);
1854
1855 /* Consume port. Then clear IIR or we'll miss events */
1856 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1857 i9xx_hpd_irq_handler(dev);
1858
1859 if (pm_iir) 2138 if (pm_iir)
1860 gen6_rps_irq_handler(dev_priv, pm_iir); 2139 gen6_rps_irq_handler(dev_priv, pm_iir);
1861 2140 /* Call regardless, as some status bits might not be
1862 I915_WRITE(GTIIR, gt_iir); 2141 * signalled in iir */
1863 I915_WRITE(GEN6_PMIIR, pm_iir); 2142 valleyview_pipestat_irq_handler(dev, iir);
1864 I915_WRITE(VLV_IIR, iir);
1865 } 2143 }
1866 2144
1867out: 2145out:
@@ -1882,21 +2160,27 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1882 if (master_ctl == 0 && iir == 0) 2160 if (master_ctl == 0 && iir == 0)
1883 break; 2161 break;
1884 2162
2163 ret = IRQ_HANDLED;
2164
1885 I915_WRITE(GEN8_MASTER_IRQ, 0); 2165 I915_WRITE(GEN8_MASTER_IRQ, 0);
1886 2166
1887 gen8_gt_irq_handler(dev, dev_priv, master_ctl); 2167 /* Find, clear, then process each source of interrupt */
1888 2168
1889 valleyview_pipestat_irq_handler(dev, iir); 2169 if (iir) {
2170 /* Consume port before clearing IIR or we'll miss events */
2171 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2172 i9xx_hpd_irq_handler(dev);
2173 I915_WRITE(VLV_IIR, iir);
2174 }
1890 2175
1891 /* Consume port. Then clear IIR or we'll miss events */ 2176 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1892 i9xx_hpd_irq_handler(dev);
1893 2177
1894 I915_WRITE(VLV_IIR, iir); 2178 /* Call regardless, as some status bits might not be
2179 * signalled in iir */
2180 valleyview_pipestat_irq_handler(dev, iir);
1895 2181
1896 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 2182 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1897 POSTING_READ(GEN8_MASTER_IRQ); 2183 POSTING_READ(GEN8_MASTER_IRQ);
1898
1899 ret = IRQ_HANDLED;
1900 } 2184 }
1901 2185
1902 return ret; 2186 return ret;
@@ -1907,8 +2191,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1907 struct drm_i915_private *dev_priv = dev->dev_private; 2191 struct drm_i915_private *dev_priv = dev->dev_private;
1908 int pipe; 2192 int pipe;
1909 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2193 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2194 u32 dig_hotplug_reg;
1910 2195
1911 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 2196 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2197 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2198
2199 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
1912 2200
1913 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2201 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1914 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2202 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -2014,8 +2302,12 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2014 struct drm_i915_private *dev_priv = dev->dev_private; 2302 struct drm_i915_private *dev_priv = dev->dev_private;
2015 int pipe; 2303 int pipe;
2016 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2304 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2305 u32 dig_hotplug_reg;
2306
2307 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2308 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2017 2309
2018 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 2310 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2019 2311
2020 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2312 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2021 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2313 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2132,6 +2424,14 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2132 } 2424 }
2133} 2425}
2134 2426
2427/*
2428 * To handle irqs with the minimum potential races with fresh interrupts, we:
2429 * 1 - Disable Master Interrupt Control.
2430 * 2 - Find the source(s) of the interrupt.
2431 * 3 - Clear the Interrupt Identity bits (IIR).
2432 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2433 * 5 - Re-enable Master Interrupt Control.
2434 */
2135static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2435static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2136{ 2436{
2137 struct drm_device *dev = arg; 2437 struct drm_device *dev = arg;
@@ -2159,32 +2459,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2159 POSTING_READ(SDEIER); 2459 POSTING_READ(SDEIER);
2160 } 2460 }
2161 2461
2462 /* Find, clear, then process each source of interrupt */
2463
2162 gt_iir = I915_READ(GTIIR); 2464 gt_iir = I915_READ(GTIIR);
2163 if (gt_iir) { 2465 if (gt_iir) {
2466 I915_WRITE(GTIIR, gt_iir);
2467 ret = IRQ_HANDLED;
2164 if (INTEL_INFO(dev)->gen >= 6) 2468 if (INTEL_INFO(dev)->gen >= 6)
2165 snb_gt_irq_handler(dev, dev_priv, gt_iir); 2469 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2166 else 2470 else
2167 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 2471 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2168 I915_WRITE(GTIIR, gt_iir);
2169 ret = IRQ_HANDLED;
2170 } 2472 }
2171 2473
2172 de_iir = I915_READ(DEIIR); 2474 de_iir = I915_READ(DEIIR);
2173 if (de_iir) { 2475 if (de_iir) {
2476 I915_WRITE(DEIIR, de_iir);
2477 ret = IRQ_HANDLED;
2174 if (INTEL_INFO(dev)->gen >= 7) 2478 if (INTEL_INFO(dev)->gen >= 7)
2175 ivb_display_irq_handler(dev, de_iir); 2479 ivb_display_irq_handler(dev, de_iir);
2176 else 2480 else
2177 ilk_display_irq_handler(dev, de_iir); 2481 ilk_display_irq_handler(dev, de_iir);
2178 I915_WRITE(DEIIR, de_iir);
2179 ret = IRQ_HANDLED;
2180 } 2482 }
2181 2483
2182 if (INTEL_INFO(dev)->gen >= 6) { 2484 if (INTEL_INFO(dev)->gen >= 6) {
2183 u32 pm_iir = I915_READ(GEN6_PMIIR); 2485 u32 pm_iir = I915_READ(GEN6_PMIIR);
2184 if (pm_iir) { 2486 if (pm_iir) {
2185 gen6_rps_irq_handler(dev_priv, pm_iir);
2186 I915_WRITE(GEN6_PMIIR, pm_iir); 2487 I915_WRITE(GEN6_PMIIR, pm_iir);
2187 ret = IRQ_HANDLED; 2488 ret = IRQ_HANDLED;
2489 gen6_rps_irq_handler(dev_priv, pm_iir);
2188 } 2490 }
2189 } 2491 }
2190 2492
@@ -2215,36 +2517,36 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2215 I915_WRITE(GEN8_MASTER_IRQ, 0); 2517 I915_WRITE(GEN8_MASTER_IRQ, 0);
2216 POSTING_READ(GEN8_MASTER_IRQ); 2518 POSTING_READ(GEN8_MASTER_IRQ);
2217 2519
2520 /* Find, clear, then process each source of interrupt */
2521
2218 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); 2522 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2219 2523
2220 if (master_ctl & GEN8_DE_MISC_IRQ) { 2524 if (master_ctl & GEN8_DE_MISC_IRQ) {
2221 tmp = I915_READ(GEN8_DE_MISC_IIR); 2525 tmp = I915_READ(GEN8_DE_MISC_IIR);
2222 if (tmp & GEN8_DE_MISC_GSE)
2223 intel_opregion_asle_intr(dev);
2224 else if (tmp)
2225 DRM_ERROR("Unexpected DE Misc interrupt\n");
2226 else
2227 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2228
2229 if (tmp) { 2526 if (tmp) {
2230 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 2527 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2231 ret = IRQ_HANDLED; 2528 ret = IRQ_HANDLED;
2529 if (tmp & GEN8_DE_MISC_GSE)
2530 intel_opregion_asle_intr(dev);
2531 else
2532 DRM_ERROR("Unexpected DE Misc interrupt\n");
2232 } 2533 }
2534 else
2535 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2233 } 2536 }
2234 2537
2235 if (master_ctl & GEN8_DE_PORT_IRQ) { 2538 if (master_ctl & GEN8_DE_PORT_IRQ) {
2236 tmp = I915_READ(GEN8_DE_PORT_IIR); 2539 tmp = I915_READ(GEN8_DE_PORT_IIR);
2237 if (tmp & GEN8_AUX_CHANNEL_A)
2238 dp_aux_irq_handler(dev);
2239 else if (tmp)
2240 DRM_ERROR("Unexpected DE Port interrupt\n");
2241 else
2242 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2243
2244 if (tmp) { 2540 if (tmp) {
2245 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2541 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2246 ret = IRQ_HANDLED; 2542 ret = IRQ_HANDLED;
2543 if (tmp & GEN8_AUX_CHANNEL_A)
2544 dp_aux_irq_handler(dev);
2545 else
2546 DRM_ERROR("Unexpected DE Port interrupt\n");
2247 } 2547 }
2548 else
2549 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2248 } 2550 }
2249 2551
2250 for_each_pipe(pipe) { 2552 for_each_pipe(pipe) {
@@ -2254,33 +2556,32 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2254 continue; 2556 continue;
2255 2557
2256 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2558 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2257 if (pipe_iir & GEN8_PIPE_VBLANK) 2559 if (pipe_iir) {
2258 intel_pipe_handle_vblank(dev, pipe); 2560 ret = IRQ_HANDLED;
2259 2561 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2260 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { 2562 if (pipe_iir & GEN8_PIPE_VBLANK)
2261 intel_prepare_page_flip(dev, pipe); 2563 intel_pipe_handle_vblank(dev, pipe);
2262 intel_finish_page_flip_plane(dev, pipe);
2263 }
2264 2564
2265 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2565 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
2266 hsw_pipe_crc_irq_handler(dev, pipe); 2566 intel_prepare_page_flip(dev, pipe);
2567 intel_finish_page_flip_plane(dev, pipe);
2568 }
2267 2569
2268 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 2570 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2269 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 2571 hsw_pipe_crc_irq_handler(dev, pipe);
2270 false))
2271 DRM_ERROR("Pipe %c FIFO underrun\n",
2272 pipe_name(pipe));
2273 }
2274 2572
2275 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { 2573 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
2276 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2574 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2277 pipe_name(pipe), 2575 false))
2278 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2576 DRM_ERROR("Pipe %c FIFO underrun\n",
2279 } 2577 pipe_name(pipe));
2578 }
2280 2579
2281 if (pipe_iir) { 2580 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
2282 ret = IRQ_HANDLED; 2581 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2283 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2582 pipe_name(pipe),
2583 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2584 }
2284 } else 2585 } else
2285 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2586 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2286 } 2587 }
@@ -2292,13 +2593,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2292 * on older pch-split platforms. But this needs testing. 2593 * on older pch-split platforms. But this needs testing.
2293 */ 2594 */
2294 u32 pch_iir = I915_READ(SDEIIR); 2595 u32 pch_iir = I915_READ(SDEIIR);
2295
2296 cpt_irq_handler(dev, pch_iir);
2297
2298 if (pch_iir) { 2596 if (pch_iir) {
2299 I915_WRITE(SDEIIR, pch_iir); 2597 I915_WRITE(SDEIIR, pch_iir);
2300 ret = IRQ_HANDLED; 2598 ret = IRQ_HANDLED;
2301 } 2599 cpt_irq_handler(dev, pch_iir);
2600 } else
2601 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2602
2302 } 2603 }
2303 2604
2304 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2605 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -2753,12 +3054,7 @@ static bool
2753ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 3054ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2754{ 3055{
2755 if (INTEL_INFO(dev)->gen >= 8) { 3056 if (INTEL_INFO(dev)->gen >= 8) {
2756 /* 3057 return (ipehr >> 23) == 0x1c;
2757 * FIXME: gen8 semaphore support - currently we don't emit
2758 * semaphores on bdw anyway, but this needs to be addressed when
2759 * we merge that code.
2760 */
2761 return false;
2762 } else { 3058 } else {
2763 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 3059 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2764 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 3060 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
@@ -2767,19 +3063,20 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2767} 3063}
2768 3064
2769static struct intel_engine_cs * 3065static struct intel_engine_cs *
2770semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr) 3066semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2771{ 3067{
2772 struct drm_i915_private *dev_priv = ring->dev->dev_private; 3068 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2773 struct intel_engine_cs *signaller; 3069 struct intel_engine_cs *signaller;
2774 int i; 3070 int i;
2775 3071
2776 if (INTEL_INFO(dev_priv->dev)->gen >= 8) { 3072 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2777 /* 3073 for_each_ring(signaller, dev_priv, i) {
2778 * FIXME: gen8 semaphore support - currently we don't emit 3074 if (ring == signaller)
2779 * semaphores on bdw anyway, but this needs to be addressed when 3075 continue;
2780 * we merge that code. 3076
2781 */ 3077 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2782 return NULL; 3078 return signaller;
3079 }
2783 } else { 3080 } else {
2784 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 3081 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2785 3082
@@ -2792,8 +3089,8 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
2792 } 3089 }
2793 } 3090 }
2794 3091
2795 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n", 3092 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2796 ring->id, ipehr); 3093 ring->id, ipehr, offset);
2797 3094
2798 return NULL; 3095 return NULL;
2799} 3096}
@@ -2803,7 +3100,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2803{ 3100{
2804 struct drm_i915_private *dev_priv = ring->dev->dev_private; 3101 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2805 u32 cmd, ipehr, head; 3102 u32 cmd, ipehr, head;
2806 int i; 3103 u64 offset = 0;
3104 int i, backwards;
2807 3105
2808 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 3106 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2809 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 3107 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
@@ -2812,13 +3110,15 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2812 /* 3110 /*
2813 * HEAD is likely pointing to the dword after the actual command, 3111 * HEAD is likely pointing to the dword after the actual command,
2814 * so scan backwards until we find the MBOX. But limit it to just 3 3112 * so scan backwards until we find the MBOX. But limit it to just 3
2815 * dwords. Note that we don't care about ACTHD here since that might 3113 * or 4 dwords depending on the semaphore wait command size.
3114 * Note that we don't care about ACTHD here since that might
2816 * point at at batch, and semaphores are always emitted into the 3115 * point at at batch, and semaphores are always emitted into the
2817 * ringbuffer itself. 3116 * ringbuffer itself.
2818 */ 3117 */
2819 head = I915_READ_HEAD(ring) & HEAD_ADDR; 3118 head = I915_READ_HEAD(ring) & HEAD_ADDR;
3119 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2820 3120
2821 for (i = 4; i; --i) { 3121 for (i = backwards; i; --i) {
2822 /* 3122 /*
2823 * Be paranoid and presume the hw has gone off into the wild - 3123 * Be paranoid and presume the hw has gone off into the wild -
2824 * our ring is smaller than what the hardware (and hence 3124 * our ring is smaller than what the hardware (and hence
@@ -2838,7 +3138,12 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2838 return NULL; 3138 return NULL;
2839 3139
2840 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; 3140 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2841 return semaphore_wait_to_signaller_ring(ring, ipehr); 3141 if (INTEL_INFO(ring->dev)->gen >= 8) {
3142 offset = ioread32(ring->buffer->virtual_start + head + 12);
3143 offset <<= 32;
3144 offset = ioread32(ring->buffer->virtual_start + head + 8);
3145 }
3146 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2842} 3147}
2843 3148
2844static int semaphore_passed(struct intel_engine_cs *ring) 3149static int semaphore_passed(struct intel_engine_cs *ring)
@@ -3159,7 +3464,9 @@ static void gen8_irq_reset(struct drm_device *dev)
3159 gen8_gt_irq_reset(dev_priv); 3464 gen8_gt_irq_reset(dev_priv);
3160 3465
3161 for_each_pipe(pipe) 3466 for_each_pipe(pipe)
3162 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3467 if (intel_display_power_enabled(dev_priv,
3468 POWER_DOMAIN_PIPE(pipe)))
3469 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3163 3470
3164 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3471 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3165 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3472 GEN5_IRQ_RESET(GEN8_DE_MISC_);
@@ -3168,6 +3475,18 @@ static void gen8_irq_reset(struct drm_device *dev)
3168 ibx_irq_reset(dev); 3475 ibx_irq_reset(dev);
3169} 3476}
3170 3477
3478void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3479{
3480 unsigned long irqflags;
3481
3482 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3483 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3484 ~dev_priv->de_irq_mask[PIPE_B]);
3485 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3486 ~dev_priv->de_irq_mask[PIPE_C]);
3487 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3488}
3489
3171static void cherryview_irq_preinstall(struct drm_device *dev) 3490static void cherryview_irq_preinstall(struct drm_device *dev)
3172{ 3491{
3173 struct drm_i915_private *dev_priv = dev->dev_private; 3492 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3492,8 +3811,11 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3492 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3811 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3493 3812
3494 for_each_pipe(pipe) 3813 for_each_pipe(pipe)
3495 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe], 3814 if (intel_display_power_enabled(dev_priv,
3496 de_pipe_enables); 3815 POWER_DOMAIN_PIPE(pipe)))
3816 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3817 dev_priv->de_irq_mask[pipe],
3818 de_pipe_enables);
3497 3819
3498 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A); 3820 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
3499} 3821}
@@ -4324,12 +4646,17 @@ void intel_irq_init(struct drm_device *dev)
4324 struct drm_i915_private *dev_priv = dev->dev_private; 4646 struct drm_i915_private *dev_priv = dev->dev_private;
4325 4647
4326 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 4648 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4649 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4327 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 4650 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4328 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4651 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4329 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4652 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4330 4653
4331 /* Let's track the enabled rps events */ 4654 /* Let's track the enabled rps events */
4332 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4655 if (IS_VALLEYVIEW(dev))
4656 /* WaGsvRC0ResidenncyMethod:VLV */
4657 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4658 else
4659 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4333 4660
4334 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 4661 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4335 i915_hangcheck_elapsed, 4662 i915_hangcheck_elapsed,
@@ -4339,6 +4666,9 @@ void intel_irq_init(struct drm_device *dev)
4339 4666
4340 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4667 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4341 4668
4669 /* Haven't installed the IRQ handler yet */
4670 dev_priv->pm._irqs_disabled = true;
4671
4342 if (IS_GEN2(dev)) { 4672 if (IS_GEN2(dev)) {
4343 dev->max_vblank_count = 0; 4673 dev->max_vblank_count = 0;
4344 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4674 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
@@ -4426,7 +4756,9 @@ void intel_hpd_init(struct drm_device *dev)
4426 list_for_each_entry(connector, &mode_config->connector_list, head) { 4756 list_for_each_entry(connector, &mode_config->connector_list, head) {
4427 struct intel_connector *intel_connector = to_intel_connector(connector); 4757 struct intel_connector *intel_connector = to_intel_connector(connector);
4428 connector->polled = intel_connector->polled; 4758 connector->polled = intel_connector->polled;
4429 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 4759 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4760 connector->polled = DRM_CONNECTOR_POLL_HPD;
4761 if (intel_connector->mst_port)
4430 connector->polled = DRM_CONNECTOR_POLL_HPD; 4762 connector->polled = DRM_CONNECTOR_POLL_HPD;
4431 } 4763 }
4432 4764
@@ -4444,7 +4776,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
4444 struct drm_i915_private *dev_priv = dev->dev_private; 4776 struct drm_i915_private *dev_priv = dev->dev_private;
4445 4777
4446 dev->driver->irq_uninstall(dev); 4778 dev->driver->irq_uninstall(dev);
4447 dev_priv->pm.irqs_disabled = true; 4779 dev_priv->pm._irqs_disabled = true;
4448} 4780}
4449 4781
4450/* Restore interrupts so we can recover from runtime PM. */ 4782/* Restore interrupts so we can recover from runtime PM. */
@@ -4452,7 +4784,7 @@ void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
4452{ 4784{
4453 struct drm_i915_private *dev_priv = dev->dev_private; 4785 struct drm_i915_private *dev_priv = dev->dev_private;
4454 4786
4455 dev_priv->pm.irqs_disabled = false; 4787 dev_priv->pm._irqs_disabled = false;
4456 dev->driver->irq_preinstall(dev); 4788 dev->driver->irq_preinstall(dev);
4457 dev->driver->irq_postinstall(dev); 4789 dev->driver->irq_postinstall(dev);
4458} 4790}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index d05a2afa17dc..62ee8308d682 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -37,7 +37,7 @@ struct i915_params i915 __read_mostly = {
37 .enable_fbc = -1, 37 .enable_fbc = -1,
38 .enable_hangcheck = true, 38 .enable_hangcheck = true,
39 .enable_ppgtt = -1, 39 .enable_ppgtt = -1,
40 .enable_psr = 0, 40 .enable_psr = 1,
41 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), 41 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
42 .disable_power_well = 1, 42 .disable_power_well = 1,
43 .enable_ips = 1, 43 .enable_ips = 1,
@@ -48,6 +48,8 @@ struct i915_params i915 __read_mostly = {
48 .disable_display = 0, 48 .disable_display = 0,
49 .enable_cmd_parser = 1, 49 .enable_cmd_parser = 1,
50 .disable_vtd_wa = 0, 50 .disable_vtd_wa = 0,
51 .use_mmio_flip = 0,
52 .mmio_debug = 0,
51}; 53};
52 54
53module_param_named(modeset, i915.modeset, int, 0400); 55module_param_named(modeset, i915.modeset, int, 0400);
@@ -117,7 +119,7 @@ MODULE_PARM_DESC(enable_ppgtt,
117 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)"); 119 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
118 120
119module_param_named(enable_psr, i915.enable_psr, int, 0600); 121module_param_named(enable_psr, i915.enable_psr, int, 0600);
120MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); 122MODULE_PARM_DESC(enable_psr, "Enable PSR (default: true)");
121 123
122module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600); 124module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
123MODULE_PARM_DESC(preliminary_hw_support, 125MODULE_PARM_DESC(preliminary_hw_support,
@@ -156,3 +158,12 @@ MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)"
156module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600); 158module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
157MODULE_PARM_DESC(enable_cmd_parser, 159MODULE_PARM_DESC(enable_cmd_parser,
158 "Enable command parsing (1=enabled [default], 0=disabled)"); 160 "Enable command parsing (1=enabled [default], 0=disabled)");
161
162module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
163MODULE_PARM_DESC(use_mmio_flip,
164 "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
165
166module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
167MODULE_PARM_DESC(mmio_debug,
168 "Enable the MMIO debug code (default: false). This may negatively "
169 "affect performance.");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a5bab61bfc00..fe5c27630e95 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -29,8 +29,8 @@
29#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) 29#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
30 30
31#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) 31#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
32#define _PIPE3(pipe, a, b, c) (pipe < 2 ? _PIPE(pipe, a, b) : c) 32#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
33#define _PORT3(port, a, b, c) (port < 2 ? _PORT(port, a, b) : c) 33 (pipe) == PIPE_B ? (b) : (c))
34 34
35#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) 35#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
36#define _MASKED_BIT_DISABLE(a) ((a) << 16) 36#define _MASKED_BIT_DISABLE(a) ((a) << 16)
@@ -240,7 +240,7 @@
240#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) 240#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
241#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) 241#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
242#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) 242#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
243#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ 243#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
244#define MI_SEMAPHORE_GLOBAL_GTT (1<<22) 244#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
245#define MI_SEMAPHORE_UPDATE (1<<21) 245#define MI_SEMAPHORE_UPDATE (1<<21)
246#define MI_SEMAPHORE_COMPARE (1<<20) 246#define MI_SEMAPHORE_COMPARE (1<<20)
@@ -266,6 +266,11 @@
266#define MI_RESTORE_EXT_STATE_EN (1<<2) 266#define MI_RESTORE_EXT_STATE_EN (1<<2)
267#define MI_FORCE_RESTORE (1<<1) 267#define MI_FORCE_RESTORE (1<<1)
268#define MI_RESTORE_INHIBIT (1<<0) 268#define MI_RESTORE_INHIBIT (1<<0)
269#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
270#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
271#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
272#define MI_SEMAPHORE_POLL (1<<15)
273#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
269#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) 274#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
270#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ 275#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
271#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) 276#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
@@ -360,6 +365,7 @@
360#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */ 365#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
361#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) 366#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
362#define PIPE_CONTROL_NOTIFY (1<<8) 367#define PIPE_CONTROL_NOTIFY (1<<8)
368#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
363#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) 369#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
364#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) 370#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
365#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) 371#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
@@ -525,10 +531,21 @@ enum punit_power_well {
525#define PUNIT_REG_GPU_FREQ_STS 0xd8 531#define PUNIT_REG_GPU_FREQ_STS 0xd8
526#define GENFREQSTATUS (1<<0) 532#define GENFREQSTATUS (1<<0)
527#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc 533#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
534#define PUNIT_REG_CZ_TIMESTAMP 0xce
528 535
529#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ 536#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
530#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */ 537#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
531 538
539#define PUNIT_GPU_STATUS_REG 0xdb
540#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16
541#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff
542#define PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT 8
543#define PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK 0xff
544
545#define PUNIT_GPU_DUTYCYCLE_REG 0xdf
546#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT 8
547#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK 0xff
548
532#define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c 549#define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c
533#define FB_GFX_MAX_FREQ_FUSE_SHIFT 3 550#define FB_GFX_MAX_FREQ_FUSE_SHIFT 3
534#define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8 551#define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8
@@ -540,6 +557,11 @@ enum punit_power_well {
540#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 557#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
541#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 558#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
542 559
560#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
561#define VLV_RP_UP_EI_THRESHOLD 90
562#define VLV_RP_DOWN_EI_THRESHOLD 70
563#define VLV_INT_COUNT_FOR_DOWN_EI 5
564
543/* vlv2 north clock has */ 565/* vlv2 north clock has */
544#define CCK_FUSE_REG 0x8 566#define CCK_FUSE_REG 0x8
545#define CCK_FUSE_HPLL_FREQ_MASK 0x3 567#define CCK_FUSE_HPLL_FREQ_MASK 0x3
@@ -574,6 +596,11 @@ enum punit_power_well {
574#define DSI_PLL_M1_DIV_SHIFT 0 596#define DSI_PLL_M1_DIV_SHIFT 0
575#define DSI_PLL_M1_DIV_MASK (0x1ff << 0) 597#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
576#define CCK_DISPLAY_CLOCK_CONTROL 0x6b 598#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
599#define DISPLAY_TRUNK_FORCE_ON (1 << 17)
600#define DISPLAY_TRUNK_FORCE_OFF (1 << 16)
601#define DISPLAY_FREQUENCY_STATUS (0x1f << 8)
602#define DISPLAY_FREQUENCY_STATUS_SHIFT 8
603#define DISPLAY_FREQUENCY_VALUES (0x1f << 0)
577 604
578/** 605/**
579 * DOC: DPIO 606 * DOC: DPIO
@@ -761,6 +788,8 @@ enum punit_power_well {
761 788
762#define _VLV_PCS_DW8_CH0 0x8220 789#define _VLV_PCS_DW8_CH0 0x8220
763#define _VLV_PCS_DW8_CH1 0x8420 790#define _VLV_PCS_DW8_CH1 0x8420
791#define CHV_PCS_USEDCLKCHANNEL_OVRRIDE (1 << 20)
792#define CHV_PCS_USEDCLKCHANNEL (1 << 21)
764#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1) 793#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
765 794
766#define _VLV_PCS01_DW8_CH0 0x0220 795#define _VLV_PCS01_DW8_CH0 0x0220
@@ -869,6 +898,16 @@ enum punit_power_well {
869#define DPIO_CHV_PROP_COEFF_SHIFT 0 898#define DPIO_CHV_PROP_COEFF_SHIFT 0
870#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1) 899#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
871 900
901#define _CHV_CMN_DW5_CH0 0x8114
902#define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
903#define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
904#define CHV_BUFRIGHTENA1_FORCE (3 << 20)
905#define CHV_BUFRIGHTENA1_MASK (3 << 20)
906#define CHV_BUFLEFTENA1_DISABLE (0 << 22)
907#define CHV_BUFLEFTENA1_NORMAL (1 << 22)
908#define CHV_BUFLEFTENA1_FORCE (3 << 22)
909#define CHV_BUFLEFTENA1_MASK (3 << 22)
910
872#define _CHV_CMN_DW13_CH0 0x8134 911#define _CHV_CMN_DW13_CH0 0x8134
873#define _CHV_CMN_DW0_CH1 0x8080 912#define _CHV_CMN_DW0_CH1 0x8080
874#define DPIO_CHV_S1_DIV_SHIFT 21 913#define DPIO_CHV_S1_DIV_SHIFT 21
@@ -883,8 +922,21 @@ enum punit_power_well {
883#define _CHV_CMN_DW1_CH1 0x8084 922#define _CHV_CMN_DW1_CH1 0x8084
884#define DPIO_AFC_RECAL (1 << 14) 923#define DPIO_AFC_RECAL (1 << 14)
885#define DPIO_DCLKP_EN (1 << 13) 924#define DPIO_DCLKP_EN (1 << 13)
925#define CHV_BUFLEFTENA2_DISABLE (0 << 17) /* CL2 DW1 only */
926#define CHV_BUFLEFTENA2_NORMAL (1 << 17) /* CL2 DW1 only */
927#define CHV_BUFLEFTENA2_FORCE (3 << 17) /* CL2 DW1 only */
928#define CHV_BUFLEFTENA2_MASK (3 << 17) /* CL2 DW1 only */
929#define CHV_BUFRIGHTENA2_DISABLE (0 << 19) /* CL2 DW1 only */
930#define CHV_BUFRIGHTENA2_NORMAL (1 << 19) /* CL2 DW1 only */
931#define CHV_BUFRIGHTENA2_FORCE (3 << 19) /* CL2 DW1 only */
932#define CHV_BUFRIGHTENA2_MASK (3 << 19) /* CL2 DW1 only */
886#define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1) 933#define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1)
887 934
935#define _CHV_CMN_DW19_CH0 0x814c
936#define _CHV_CMN_DW6_CH1 0x8098
937#define CHV_CMN_USEDCLKCHANNEL (1 << 13)
938#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1)
939
888#define CHV_CMN_DW30 0x8178 940#define CHV_CMN_DW30 0x8178
889#define DPIO_LRC_BYPASS (1 << 3) 941#define DPIO_LRC_BYPASS (1 << 3)
890 942
@@ -933,6 +985,7 @@ enum punit_power_well {
933#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 985#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
934#define GEN7_FENCE_MAX_PITCH_VAL 0x0800 986#define GEN7_FENCE_MAX_PITCH_VAL 0x0800
935 987
988
936/* control register for cpu gtt access */ 989/* control register for cpu gtt access */
937#define TILECTL 0x101000 990#define TILECTL 0x101000
938#define TILECTL_SWZCTL (1 << 0) 991#define TILECTL_SWZCTL (1 << 0)
@@ -1170,6 +1223,8 @@ enum punit_power_well {
1170#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8) 1223#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
1171#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac) 1224#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
1172#define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120) 1225#define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120)
1226#define VLV_PCBR_ADDR_SHIFT 12
1227
1173#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */ 1228#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
1174#define EIR 0x020b0 1229#define EIR 0x020b0
1175#define EMR 0x020b4 1230#define EMR 0x020b4
@@ -1570,11 +1625,10 @@ enum punit_power_well {
1570/* 1625/*
1571 * Clock control & power management 1626 * Clock control & power management
1572 */ 1627 */
1573#define DPLL_A_OFFSET 0x6014 1628#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
1574#define DPLL_B_OFFSET 0x6018 1629#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
1575#define CHV_DPLL_C_OFFSET 0x6030 1630#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030)
1576#define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \ 1631#define DPLL(pipe) _PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
1577 dev_priv->info.display_mmio_offset)
1578 1632
1579#define VGA0 0x6000 1633#define VGA0 0x6000
1580#define VGA1 0x6004 1634#define VGA1 0x6004
@@ -1662,11 +1716,10 @@ enum punit_power_well {
1662#define SDVO_MULTIPLIER_SHIFT_HIRES 4 1716#define SDVO_MULTIPLIER_SHIFT_HIRES 4
1663#define SDVO_MULTIPLIER_SHIFT_VGA 0 1717#define SDVO_MULTIPLIER_SHIFT_VGA 0
1664 1718
1665#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */ 1719#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
1666#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */ 1720#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
1667#define CHV_DPLL_C_MD_OFFSET 0x603c 1721#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c)
1668#define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \ 1722#define DPLL_MD(pipe) _PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
1669 dev_priv->info.display_mmio_offset)
1670 1723
1671/* 1724/*
1672 * UDI pixel divider, controlling how many pixels are stuffed into a packet. 1725 * UDI pixel divider, controlling how many pixels are stuffed into a packet.
@@ -2231,7 +2284,7 @@ enum punit_power_well {
2231/* Same as Haswell, but 72064 bytes now. */ 2284/* Same as Haswell, but 72064 bytes now. */
2232#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE) 2285#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
2233 2286
2234 2287#define CHV_CLK_CTL1 0x101100
2235#define VLV_CLK_CTL2 0x101104 2288#define VLV_CLK_CTL2 0x101104
2236#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28 2289#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
2237 2290
@@ -2376,6 +2429,7 @@ enum punit_power_well {
2376#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) 2429#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
2377#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0) 2430#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
2378#define EDP_PSR_ENABLE (1<<31) 2431#define EDP_PSR_ENABLE (1<<31)
2432#define BDW_PSR_SINGLE_FRAME (1<<30)
2379#define EDP_PSR_LINK_DISABLE (0<<27) 2433#define EDP_PSR_LINK_DISABLE (0<<27)
2380#define EDP_PSR_LINK_STANDBY (1<<27) 2434#define EDP_PSR_LINK_STANDBY (1<<27)
2381#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25) 2435#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
@@ -2533,8 +2587,14 @@ enum punit_power_well {
2533#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28) 2587#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
2534#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29) 2588#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
2535#define PORTD_HOTPLUG_INT_STATUS (3 << 21) 2589#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
2590#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
2591#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
2536#define PORTC_HOTPLUG_INT_STATUS (3 << 19) 2592#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
2593#define PORTC_HOTPLUG_INT_LONG_PULSE (2 << 19)
2594#define PORTC_HOTPLUG_INT_SHORT_PULSE (1 << 19)
2537#define PORTB_HOTPLUG_INT_STATUS (3 << 17) 2595#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
2596#define PORTB_HOTPLUG_INT_LONG_PULSE (2 << 17)
2597#define PORTB_HOTPLUG_INT_SHORT_PLUSE (1 << 17)
2538/* CRT/TV common between gen3+ */ 2598/* CRT/TV common between gen3+ */
2539#define CRT_HOTPLUG_INT_STATUS (1 << 11) 2599#define CRT_HOTPLUG_INT_STATUS (1 << 11)
2540#define TV_HOTPLUG_INT_STATUS (1 << 10) 2600#define TV_HOTPLUG_INT_STATUS (1 << 10)
@@ -2588,7 +2648,7 @@ enum punit_power_well {
2588 2648
2589#define PORT_DFT_I9XX 0x61150 2649#define PORT_DFT_I9XX 0x61150
2590#define DC_BALANCE_RESET (1 << 25) 2650#define DC_BALANCE_RESET (1 << 25)
2591#define PORT_DFT2_G4X 0x61154 2651#define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154)
2592#define DC_BALANCE_RESET_VLV (1 << 31) 2652#define DC_BALANCE_RESET_VLV (1 << 31)
2593#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0) 2653#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0)
2594#define PIPE_B_SCRAMBLE_RESET (1 << 1) 2654#define PIPE_B_SCRAMBLE_RESET (1 << 1)
@@ -4630,6 +4690,8 @@ enum punit_power_well {
4630#define GEN7_L3CNTLREG1 0xB01C 4690#define GEN7_L3CNTLREG1 0xB01C
4631#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C 4691#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
4632#define GEN7_L3AGDIS (1<<19) 4692#define GEN7_L3AGDIS (1<<19)
4693#define GEN7_L3CNTLREG2 0xB020
4694#define GEN7_L3CNTLREG3 0xB024
4633 4695
4634#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 4696#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
4635#define GEN7_WA_L3_CHICKEN_MODE 0x20000000 4697#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
@@ -4876,8 +4938,7 @@ enum punit_power_well {
4876#define _PCH_TRANSA_LINK_M2 0xe0048 4938#define _PCH_TRANSA_LINK_M2 0xe0048
4877#define _PCH_TRANSA_LINK_N2 0xe004c 4939#define _PCH_TRANSA_LINK_N2 0xe004c
4878 4940
4879/* Per-transcoder DIP controls */ 4941/* Per-transcoder DIP controls (PCH) */
4880
4881#define _VIDEO_DIP_CTL_A 0xe0200 4942#define _VIDEO_DIP_CTL_A 0xe0200
4882#define _VIDEO_DIP_DATA_A 0xe0208 4943#define _VIDEO_DIP_DATA_A 0xe0208
4883#define _VIDEO_DIP_GCP_A 0xe0210 4944#define _VIDEO_DIP_GCP_A 0xe0210
@@ -4890,6 +4951,7 @@ enum punit_power_well {
4890#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) 4951#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
4891#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) 4952#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
4892 4953
4954/* Per-transcoder DIP controls (VLV) */
4893#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200) 4955#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
4894#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208) 4956#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
4895#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210) 4957#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
@@ -4898,12 +4960,19 @@ enum punit_power_well {
4898#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174) 4960#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
4899#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178) 4961#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
4900 4962
4963#define CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0)
4964#define CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4)
4965#define CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8)
4966
4901#define VLV_TVIDEO_DIP_CTL(pipe) \ 4967#define VLV_TVIDEO_DIP_CTL(pipe) \
4902 _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B) 4968 _PIPE3((pipe), VLV_VIDEO_DIP_CTL_A, \
4969 VLV_VIDEO_DIP_CTL_B, CHV_VIDEO_DIP_CTL_C)
4903#define VLV_TVIDEO_DIP_DATA(pipe) \ 4970#define VLV_TVIDEO_DIP_DATA(pipe) \
4904 _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B) 4971 _PIPE3((pipe), VLV_VIDEO_DIP_DATA_A, \
4972 VLV_VIDEO_DIP_DATA_B, CHV_VIDEO_DIP_DATA_C)
4905#define VLV_TVIDEO_DIP_GCP(pipe) \ 4973#define VLV_TVIDEO_DIP_GCP(pipe) \
4906 _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B) 4974 _PIPE3((pipe), VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
4975 VLV_VIDEO_DIP_GDCP_PAYLOAD_B, CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
4907 4976
4908/* Haswell DIP controls */ 4977/* Haswell DIP controls */
4909#define HSW_VIDEO_DIP_CTL_A 0x60200 4978#define HSW_VIDEO_DIP_CTL_A 0x60200
@@ -5334,6 +5403,7 @@ enum punit_power_well {
5334#define VLV_GTLC_ALLOWWAKEERR (1 << 1) 5403#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
5335#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5) 5404#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
5336#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7) 5405#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
5406#define VLV_GTLC_SURVIVABILITY_REG 0x130098
5337#define FORCEWAKE_MT 0xa188 /* multi-threaded */ 5407#define FORCEWAKE_MT 0xa188 /* multi-threaded */
5338#define FORCEWAKE_KERNEL 0x1 5408#define FORCEWAKE_KERNEL 0x1
5339#define FORCEWAKE_USER 0x2 5409#define FORCEWAKE_USER 0x2
@@ -5471,6 +5541,12 @@ enum punit_power_well {
5471 GEN6_PM_RP_DOWN_THRESHOLD | \ 5541 GEN6_PM_RP_DOWN_THRESHOLD | \
5472 GEN6_PM_RP_DOWN_TIMEOUT) 5542 GEN6_PM_RP_DOWN_TIMEOUT)
5473 5543
5544#define CHV_CZ_CLOCK_FREQ_MODE_200 200
5545#define CHV_CZ_CLOCK_FREQ_MODE_267 267
5546#define CHV_CZ_CLOCK_FREQ_MODE_320 320
5547#define CHV_CZ_CLOCK_FREQ_MODE_333 333
5548#define CHV_CZ_CLOCK_FREQ_MODE_400 400
5549
5474#define GEN7_GT_SCRATCH_BASE 0x4F100 5550#define GEN7_GT_SCRATCH_BASE 0x4F100
5475#define GEN7_GT_SCRATCH_REG_NUM 8 5551#define GEN7_GT_SCRATCH_REG_NUM 8
5476 5552
@@ -5481,6 +5557,8 @@ enum punit_power_well {
5481#define GEN6_GT_GFX_RC6_LOCKED 0x138104 5557#define GEN6_GT_GFX_RC6_LOCKED 0x138104
5482#define VLV_COUNTER_CONTROL 0x138104 5558#define VLV_COUNTER_CONTROL 0x138104
5483#define VLV_COUNT_RANGE_HIGH (1<<15) 5559#define VLV_COUNT_RANGE_HIGH (1<<15)
5560#define VLV_MEDIA_RC0_COUNT_EN (1<<5)
5561#define VLV_RENDER_RC0_COUNT_EN (1<<4)
5484#define VLV_MEDIA_RC6_COUNT_EN (1<<1) 5562#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
5485#define VLV_RENDER_RC6_COUNT_EN (1<<0) 5563#define VLV_RENDER_RC6_COUNT_EN (1<<0)
5486#define GEN6_GT_GFX_RC6 0x138108 5564#define GEN6_GT_GFX_RC6 0x138108
@@ -5489,6 +5567,8 @@ enum punit_power_well {
5489 5567
5490#define GEN6_GT_GFX_RC6p 0x13810C 5568#define GEN6_GT_GFX_RC6p 0x13810C
5491#define GEN6_GT_GFX_RC6pp 0x138110 5569#define GEN6_GT_GFX_RC6pp 0x138110
5570#define VLV_RENDER_C0_COUNT_REG 0x138118
5571#define VLV_MEDIA_C0_COUNT_REG 0x13811C
5492 5572
5493#define GEN6_PCODE_MAILBOX 0x138124 5573#define GEN6_PCODE_MAILBOX 0x138124
5494#define GEN6_PCODE_READY (1<<31) 5574#define GEN6_PCODE_READY (1<<31)
@@ -5723,6 +5803,7 @@ enum punit_power_well {
5723#define TRANS_DDI_FUNC_ENABLE (1<<31) 5803#define TRANS_DDI_FUNC_ENABLE (1<<31)
5724/* Those bits are ignored by pipe EDP since it can only connect to DDI A */ 5804/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
5725#define TRANS_DDI_PORT_MASK (7<<28) 5805#define TRANS_DDI_PORT_MASK (7<<28)
5806#define TRANS_DDI_PORT_SHIFT 28
5726#define TRANS_DDI_SELECT_PORT(x) ((x)<<28) 5807#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
5727#define TRANS_DDI_PORT_NONE (0<<28) 5808#define TRANS_DDI_PORT_NONE (0<<28)
5728#define TRANS_DDI_MODE_SELECT_MASK (7<<24) 5809#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
@@ -5743,6 +5824,7 @@ enum punit_power_well {
5743#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12) 5824#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
5744#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12) 5825#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
5745#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12) 5826#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
5827#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8)
5746#define TRANS_DDI_BFI_ENABLE (1<<4) 5828#define TRANS_DDI_BFI_ENABLE (1<<4)
5747 5829
5748/* DisplayPort Transport Control */ 5830/* DisplayPort Transport Control */
@@ -5752,6 +5834,7 @@ enum punit_power_well {
5752#define DP_TP_CTL_ENABLE (1<<31) 5834#define DP_TP_CTL_ENABLE (1<<31)
5753#define DP_TP_CTL_MODE_SST (0<<27) 5835#define DP_TP_CTL_MODE_SST (0<<27)
5754#define DP_TP_CTL_MODE_MST (1<<27) 5836#define DP_TP_CTL_MODE_MST (1<<27)
5837#define DP_TP_CTL_FORCE_ACT (1<<25)
5755#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18) 5838#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
5756#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15) 5839#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
5757#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) 5840#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
@@ -5766,15 +5849,19 @@ enum punit_power_well {
5766#define DP_TP_STATUS_A 0x64044 5849#define DP_TP_STATUS_A 0x64044
5767#define DP_TP_STATUS_B 0x64144 5850#define DP_TP_STATUS_B 0x64144
5768#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) 5851#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
5769#define DP_TP_STATUS_IDLE_DONE (1<<25) 5852#define DP_TP_STATUS_IDLE_DONE (1<<25)
5770#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) 5853#define DP_TP_STATUS_ACT_SENT (1<<24)
5854#define DP_TP_STATUS_MODE_STATUS_MST (1<<23)
5855#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
5856#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2 (3 << 8)
5857#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1 (3 << 4)
5858#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0)
5771 5859
5772/* DDI Buffer Control */ 5860/* DDI Buffer Control */
5773#define DDI_BUF_CTL_A 0x64000 5861#define DDI_BUF_CTL_A 0x64000
5774#define DDI_BUF_CTL_B 0x64100 5862#define DDI_BUF_CTL_B 0x64100
5775#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) 5863#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
5776#define DDI_BUF_CTL_ENABLE (1<<31) 5864#define DDI_BUF_CTL_ENABLE (1<<31)
5777/* Haswell */
5778#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ 5865#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
5779#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ 5866#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
5780#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */ 5867#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
@@ -5784,16 +5871,6 @@ enum punit_power_well {
5784#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */ 5871#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
5785#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ 5872#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
5786#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ 5873#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
5787/* Broadwell */
5788#define DDI_BUF_EMP_400MV_0DB_BDW (0<<24) /* Sel0 */
5789#define DDI_BUF_EMP_400MV_3_5DB_BDW (1<<24) /* Sel1 */
5790#define DDI_BUF_EMP_400MV_6DB_BDW (2<<24) /* Sel2 */
5791#define DDI_BUF_EMP_600MV_0DB_BDW (3<<24) /* Sel3 */
5792#define DDI_BUF_EMP_600MV_3_5DB_BDW (4<<24) /* Sel4 */
5793#define DDI_BUF_EMP_600MV_6DB_BDW (5<<24) /* Sel5 */
5794#define DDI_BUF_EMP_800MV_0DB_BDW (6<<24) /* Sel6 */
5795#define DDI_BUF_EMP_800MV_3_5DB_BDW (7<<24) /* Sel7 */
5796#define DDI_BUF_EMP_1200MV_0DB_BDW (8<<24) /* Sel8 */
5797#define DDI_BUF_EMP_MASK (0xf<<24) 5874#define DDI_BUF_EMP_MASK (0xf<<24)
5798#define DDI_BUF_PORT_REVERSAL (1<<16) 5875#define DDI_BUF_PORT_REVERSAL (1<<16)
5799#define DDI_BUF_IS_IDLE (1<<7) 5876#define DDI_BUF_IS_IDLE (1<<7)
@@ -5861,10 +5938,12 @@ enum punit_power_well {
5861/* WRPLL */ 5938/* WRPLL */
5862#define WRPLL_CTL1 0x46040 5939#define WRPLL_CTL1 0x46040
5863#define WRPLL_CTL2 0x46060 5940#define WRPLL_CTL2 0x46060
5941#define WRPLL_CTL(pll) (pll == 0 ? WRPLL_CTL1 : WRPLL_CTL2)
5864#define WRPLL_PLL_ENABLE (1<<31) 5942#define WRPLL_PLL_ENABLE (1<<31)
5865#define WRPLL_PLL_SELECT_SSC (0x01<<28) 5943#define WRPLL_PLL_SSC (1<<28)
5866#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28) 5944#define WRPLL_PLL_NON_SSC (2<<28)
5867#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) 5945#define WRPLL_PLL_LCPLL (3<<28)
5946#define WRPLL_PLL_REF_MASK (3<<28)
5868/* WRPLL divider programming */ 5947/* WRPLL divider programming */
5869#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) 5948#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
5870#define WRPLL_DIVIDER_REF_MASK (0xff) 5949#define WRPLL_DIVIDER_REF_MASK (0xff)
@@ -5883,6 +5962,7 @@ enum punit_power_well {
5883#define PORT_CLK_SEL_LCPLL_1350 (1<<29) 5962#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
5884#define PORT_CLK_SEL_LCPLL_810 (2<<29) 5963#define PORT_CLK_SEL_LCPLL_810 (2<<29)
5885#define PORT_CLK_SEL_SPLL (3<<29) 5964#define PORT_CLK_SEL_SPLL (3<<29)
5965#define PORT_CLK_SEL_WRPLL(pll) (((pll)+4)<<29)
5886#define PORT_CLK_SEL_WRPLL1 (4<<29) 5966#define PORT_CLK_SEL_WRPLL1 (4<<29)
5887#define PORT_CLK_SEL_WRPLL2 (5<<29) 5967#define PORT_CLK_SEL_WRPLL2 (5<<29)
5888#define PORT_CLK_SEL_NONE (7<<29) 5968#define PORT_CLK_SEL_NONE (7<<29)
@@ -5924,7 +6004,10 @@ enum punit_power_well {
5924#define LCPLL_CD_SOURCE_FCLK (1<<21) 6004#define LCPLL_CD_SOURCE_FCLK (1<<21)
5925#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19) 6005#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
5926 6006
5927#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) 6007/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
6008 * since on HSW we can't write to it using I915_WRITE. */
6009#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
6010#define D_COMP_BDW 0x138144
5928#define D_COMP_RCOMP_IN_PROGRESS (1<<9) 6011#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
5929#define D_COMP_COMP_FORCE (1<<8) 6012#define D_COMP_COMP_FORCE (1<<8)
5930#define D_COMP_COMP_DISABLE (1<<0) 6013#define D_COMP_COMP_DISABLE (1<<0)
@@ -6005,7 +6088,8 @@ enum punit_power_well {
6005 6088
6006#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) 6089#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
6007#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) 6090#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
6008#define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL) 6091#define MIPI_PORT_CTRL(tc) _TRANSCODER(tc, _MIPIA_PORT_CTRL, \
6092 _MIPIB_PORT_CTRL)
6009#define DPI_ENABLE (1 << 31) /* A + B */ 6093#define DPI_ENABLE (1 << 31) /* A + B */
6010#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27 6094#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
6011#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27) 6095#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
@@ -6047,18 +6131,20 @@ enum punit_power_well {
6047 6131
6048#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194) 6132#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
6049#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704) 6133#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
6050#define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL) 6134#define MIPI_TEARING_CTRL(tc) _TRANSCODER(tc, \
6135 _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
6051#define TEARING_EFFECT_DELAY_SHIFT 0 6136#define TEARING_EFFECT_DELAY_SHIFT 0
6052#define TEARING_EFFECT_DELAY_MASK (0xffff << 0) 6137#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
6053 6138
6054/* XXX: all bits reserved */ 6139/* XXX: all bits reserved */
6055#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0) 6140#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
6056 6141
6057/* MIPI DSI Controller and D-PHY registers */ 6142/* MIPI DSI Controller and D-PHY registers */
6058 6143
6059#define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000) 6144#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
6060#define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800) 6145#define _MIPIB_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
6061#define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY) 6146#define MIPI_DEVICE_READY(tc) _TRANSCODER(tc, _MIPIA_DEVICE_READY, \
6147 _MIPIB_DEVICE_READY)
6062#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */ 6148#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
6063#define ULPS_STATE_MASK (3 << 1) 6149#define ULPS_STATE_MASK (3 << 1)
6064#define ULPS_STATE_ENTER (2 << 1) 6150#define ULPS_STATE_ENTER (2 << 1)
@@ -6066,12 +6152,14 @@ enum punit_power_well {
6066#define ULPS_STATE_NORMAL_OPERATION (0 << 1) 6152#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
6067#define DEVICE_READY (1 << 0) 6153#define DEVICE_READY (1 << 0)
6068 6154
6069#define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004) 6155#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
6070#define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804) 6156#define _MIPIB_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
6071#define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT) 6157#define MIPI_INTR_STAT(tc) _TRANSCODER(tc, _MIPIA_INTR_STAT, \
6072#define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008) 6158 _MIPIB_INTR_STAT)
6073#define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808) 6159#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
6074#define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN) 6160#define _MIPIB_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
6161#define MIPI_INTR_EN(tc) _TRANSCODER(tc, _MIPIA_INTR_EN, \
6162 _MIPIB_INTR_EN)
6075#define TEARING_EFFECT (1 << 31) 6163#define TEARING_EFFECT (1 << 31)
6076#define SPL_PKT_SENT_INTERRUPT (1 << 30) 6164#define SPL_PKT_SENT_INTERRUPT (1 << 30)
6077#define GEN_READ_DATA_AVAIL (1 << 29) 6165#define GEN_READ_DATA_AVAIL (1 << 29)
@@ -6105,9 +6193,10 @@ enum punit_power_well {
6105#define RXSOT_SYNC_ERROR (1 << 1) 6193#define RXSOT_SYNC_ERROR (1 << 1)
6106#define RXSOT_ERROR (1 << 0) 6194#define RXSOT_ERROR (1 << 0)
6107 6195
6108#define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c) 6196#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
6109#define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c) 6197#define _MIPIB_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
6110#define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG) 6198#define MIPI_DSI_FUNC_PRG(tc) _TRANSCODER(tc, _MIPIA_DSI_FUNC_PRG, \
6199 _MIPIB_DSI_FUNC_PRG)
6111#define CMD_MODE_DATA_WIDTH_MASK (7 << 13) 6200#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
6112#define CMD_MODE_NOT_SUPPORTED (0 << 13) 6201#define CMD_MODE_NOT_SUPPORTED (0 << 13)
6113#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13) 6202#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
@@ -6128,78 +6217,94 @@ enum punit_power_well {
6128#define DATA_LANES_PRG_REG_SHIFT 0 6217#define DATA_LANES_PRG_REG_SHIFT 0
6129#define DATA_LANES_PRG_REG_MASK (7 << 0) 6218#define DATA_LANES_PRG_REG_MASK (7 << 0)
6130 6219
6131#define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010) 6220#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
6132#define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810) 6221#define _MIPIB_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
6133#define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT) 6222#define MIPI_HS_TX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_HS_TX_TIMEOUT, \
6223 _MIPIB_HS_TX_TIMEOUT)
6134#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff 6224#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
6135 6225
6136#define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014) 6226#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
6137#define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814) 6227#define _MIPIB_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
6138#define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT) 6228#define MIPI_LP_RX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_LP_RX_TIMEOUT, \
6229 _MIPIB_LP_RX_TIMEOUT)
6139#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff 6230#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
6140 6231
6141#define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018) 6232#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
6142#define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818) 6233#define _MIPIB_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
6143#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT) 6234#define MIPI_TURN_AROUND_TIMEOUT(tc) _TRANSCODER(tc, \
6235 _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
6144#define TURN_AROUND_TIMEOUT_MASK 0x3f 6236#define TURN_AROUND_TIMEOUT_MASK 0x3f
6145 6237
6146#define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c) 6238#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
6147#define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c) 6239#define _MIPIB_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
6148#define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER) 6240#define MIPI_DEVICE_RESET_TIMER(tc) _TRANSCODER(tc, \
6241 _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
6149#define DEVICE_RESET_TIMER_MASK 0xffff 6242#define DEVICE_RESET_TIMER_MASK 0xffff
6150 6243
6151#define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020) 6244#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
6152#define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820) 6245#define _MIPIB_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
6153#define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION) 6246#define MIPI_DPI_RESOLUTION(tc) _TRANSCODER(tc, _MIPIA_DPI_RESOLUTION, \
6247 _MIPIB_DPI_RESOLUTION)
6154#define VERTICAL_ADDRESS_SHIFT 16 6248#define VERTICAL_ADDRESS_SHIFT 16
6155#define VERTICAL_ADDRESS_MASK (0xffff << 16) 6249#define VERTICAL_ADDRESS_MASK (0xffff << 16)
6156#define HORIZONTAL_ADDRESS_SHIFT 0 6250#define HORIZONTAL_ADDRESS_SHIFT 0
6157#define HORIZONTAL_ADDRESS_MASK 0xffff 6251#define HORIZONTAL_ADDRESS_MASK 0xffff
6158 6252
6159#define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024) 6253#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
6160#define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824) 6254#define _MIPIB_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
6161#define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE) 6255#define MIPI_DBI_FIFO_THROTTLE(tc) _TRANSCODER(tc, \
6256 _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
6162#define DBI_FIFO_EMPTY_HALF (0 << 0) 6257#define DBI_FIFO_EMPTY_HALF (0 << 0)
6163#define DBI_FIFO_EMPTY_QUARTER (1 << 0) 6258#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
6164#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0) 6259#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
6165 6260
6166/* regs below are bits 15:0 */ 6261/* regs below are bits 15:0 */
6167#define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028) 6262#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
6168#define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828) 6263#define _MIPIB_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
6169#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT) 6264#define MIPI_HSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
6170 6265 _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
6171#define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c) 6266
6172#define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c) 6267#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
6173#define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT) 6268#define _MIPIB_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
6174 6269#define MIPI_HBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HBP_COUNT, \
6175#define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030) 6270 _MIPIB_HBP_COUNT)
6176#define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830) 6271
6177#define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT) 6272#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
6178 6273#define _MIPIB_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
6179#define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034) 6274#define MIPI_HFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HFP_COUNT, \
6180#define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834) 6275 _MIPIB_HFP_COUNT)
6181#define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT) 6276
6182 6277#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
6183#define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038) 6278#define _MIPIB_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
6184#define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838) 6279#define MIPI_HACTIVE_AREA_COUNT(tc) _TRANSCODER(tc, \
6185#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT) 6280 _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
6281
6282#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
6283#define _MIPIB_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
6284#define MIPI_VSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
6285 _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
6286
6287#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
6288#define _MIPIB_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
6289#define MIPI_VBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VBP_COUNT, \
6290 _MIPIB_VBP_COUNT)
6291
6292#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
6293#define _MIPIB_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
6294#define MIPI_VFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VFP_COUNT, \
6295 _MIPIB_VFP_COUNT)
6296
6297#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
6298#define _MIPIB_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
6299#define MIPI_HIGH_LOW_SWITCH_COUNT(tc) _TRANSCODER(tc, \
6300 _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
6186 6301
6187#define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c)
6188#define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c)
6189#define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
6190
6191#define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040)
6192#define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840)
6193#define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
6194
6195#define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044)
6196#define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844)
6197#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
6198/* regs above are bits 15:0 */ 6302/* regs above are bits 15:0 */
6199 6303
6200#define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048) 6304#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
6201#define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848) 6305#define _MIPIB_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
6202#define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL) 6306#define MIPI_DPI_CONTROL(tc) _TRANSCODER(tc, _MIPIA_DPI_CONTROL, \
6307 _MIPIB_DPI_CONTROL)
6203#define DPI_LP_MODE (1 << 6) 6308#define DPI_LP_MODE (1 << 6)
6204#define BACKLIGHT_OFF (1 << 5) 6309#define BACKLIGHT_OFF (1 << 5)
6205#define BACKLIGHT_ON (1 << 4) 6310#define BACKLIGHT_ON (1 << 4)
@@ -6208,27 +6313,31 @@ enum punit_power_well {
6208#define TURN_ON (1 << 1) 6313#define TURN_ON (1 << 1)
6209#define SHUTDOWN (1 << 0) 6314#define SHUTDOWN (1 << 0)
6210 6315
6211#define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c) 6316#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
6212#define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c) 6317#define _MIPIB_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
6213#define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA) 6318#define MIPI_DPI_DATA(tc) _TRANSCODER(tc, _MIPIA_DPI_DATA, \
6319 _MIPIB_DPI_DATA)
6214#define COMMAND_BYTE_SHIFT 0 6320#define COMMAND_BYTE_SHIFT 0
6215#define COMMAND_BYTE_MASK (0x3f << 0) 6321#define COMMAND_BYTE_MASK (0x3f << 0)
6216 6322
6217#define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050) 6323#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
6218#define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850) 6324#define _MIPIB_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
6219#define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT) 6325#define MIPI_INIT_COUNT(tc) _TRANSCODER(tc, _MIPIA_INIT_COUNT, \
6326 _MIPIB_INIT_COUNT)
6220#define MASTER_INIT_TIMER_SHIFT 0 6327#define MASTER_INIT_TIMER_SHIFT 0
6221#define MASTER_INIT_TIMER_MASK (0xffff << 0) 6328#define MASTER_INIT_TIMER_MASK (0xffff << 0)
6222 6329
6223#define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054) 6330#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
6224#define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854) 6331#define _MIPIB_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
6225#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE) 6332#define MIPI_MAX_RETURN_PKT_SIZE(tc) _TRANSCODER(tc, \
6333 _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
6226#define MAX_RETURN_PKT_SIZE_SHIFT 0 6334#define MAX_RETURN_PKT_SIZE_SHIFT 0
6227#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0) 6335#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
6228 6336
6229#define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058) 6337#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
6230#define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858) 6338#define _MIPIB_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
6231#define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT) 6339#define MIPI_VIDEO_MODE_FORMAT(tc) _TRANSCODER(tc, \
6340 _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
6232#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4) 6341#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
6233#define DISABLE_VIDEO_BTA (1 << 3) 6342#define DISABLE_VIDEO_BTA (1 << 3)
6234#define IP_TG_CONFIG (1 << 2) 6343#define IP_TG_CONFIG (1 << 2)
@@ -6236,9 +6345,10 @@ enum punit_power_well {
6236#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0) 6345#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
6237#define VIDEO_MODE_BURST (3 << 0) 6346#define VIDEO_MODE_BURST (3 << 0)
6238 6347
6239#define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c) 6348#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
6240#define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c) 6349#define _MIPIB_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
6241#define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE) 6350#define MIPI_EOT_DISABLE(tc) _TRANSCODER(tc, _MIPIA_EOT_DISABLE, \
6351 _MIPIB_EOT_DISABLE)
6242#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) 6352#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
6243#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) 6353#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
6244#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) 6354#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
@@ -6248,28 +6358,33 @@ enum punit_power_well {
6248#define CLOCKSTOP (1 << 1) 6358#define CLOCKSTOP (1 << 1)
6249#define EOT_DISABLE (1 << 0) 6359#define EOT_DISABLE (1 << 0)
6250 6360
6251#define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060) 6361#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
6252#define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860) 6362#define _MIPIB_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
6253#define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK) 6363#define MIPI_LP_BYTECLK(tc) _TRANSCODER(tc, _MIPIA_LP_BYTECLK, \
6364 _MIPIB_LP_BYTECLK)
6254#define LP_BYTECLK_SHIFT 0 6365#define LP_BYTECLK_SHIFT 0
6255#define LP_BYTECLK_MASK (0xffff << 0) 6366#define LP_BYTECLK_MASK (0xffff << 0)
6256 6367
6257/* bits 31:0 */ 6368/* bits 31:0 */
6258#define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064) 6369#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
6259#define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864) 6370#define _MIPIB_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
6260#define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA) 6371#define MIPI_LP_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_DATA, \
6372 _MIPIB_LP_GEN_DATA)
6261 6373
6262/* bits 31:0 */ 6374/* bits 31:0 */
6263#define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068) 6375#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
6264#define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868) 6376#define _MIPIB_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
6265#define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA) 6377#define MIPI_HS_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_DATA, \
6266 6378 _MIPIB_HS_GEN_DATA)
6267#define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c) 6379
6268#define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c) 6380#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
6269#define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL) 6381#define _MIPIB_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
6270#define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070) 6382#define MIPI_LP_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_CTRL, \
6271#define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870) 6383 _MIPIB_LP_GEN_CTRL)
6272#define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL) 6384#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
6385#define _MIPIB_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
6386#define MIPI_HS_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_CTRL, \
6387 _MIPIB_HS_GEN_CTRL)
6273#define LONG_PACKET_WORD_COUNT_SHIFT 8 6388#define LONG_PACKET_WORD_COUNT_SHIFT 8
6274#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8) 6389#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
6275#define SHORT_PACKET_PARAM_SHIFT 8 6390#define SHORT_PACKET_PARAM_SHIFT 8
@@ -6280,9 +6395,10 @@ enum punit_power_well {
6280#define DATA_TYPE_MASK (3f << 0) 6395#define DATA_TYPE_MASK (3f << 0)
6281/* data type values, see include/video/mipi_display.h */ 6396/* data type values, see include/video/mipi_display.h */
6282 6397
6283#define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074) 6398#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
6284#define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874) 6399#define _MIPIB_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
6285#define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT) 6400#define MIPI_GEN_FIFO_STAT(tc) _TRANSCODER(tc, _MIPIA_GEN_FIFO_STAT, \
6401 _MIPIB_GEN_FIFO_STAT)
6286#define DPI_FIFO_EMPTY (1 << 28) 6402#define DPI_FIFO_EMPTY (1 << 28)
6287#define DBI_FIFO_EMPTY (1 << 27) 6403#define DBI_FIFO_EMPTY (1 << 27)
6288#define LP_CTRL_FIFO_EMPTY (1 << 26) 6404#define LP_CTRL_FIFO_EMPTY (1 << 26)
@@ -6298,16 +6414,18 @@ enum punit_power_well {
6298#define HS_DATA_FIFO_HALF_EMPTY (1 << 1) 6414#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
6299#define HS_DATA_FIFO_FULL (1 << 0) 6415#define HS_DATA_FIFO_FULL (1 << 0)
6300 6416
6301#define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078) 6417#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
6302#define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878) 6418#define _MIPIB_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
6303#define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE) 6419#define MIPI_HS_LP_DBI_ENABLE(tc) _TRANSCODER(tc, \
6420 _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
6304#define DBI_HS_LP_MODE_MASK (1 << 0) 6421#define DBI_HS_LP_MODE_MASK (1 << 0)
6305#define DBI_LP_MODE (1 << 0) 6422#define DBI_LP_MODE (1 << 0)
6306#define DBI_HS_MODE (0 << 0) 6423#define DBI_HS_MODE (0 << 0)
6307 6424
6308#define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080) 6425#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
6309#define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880) 6426#define _MIPIB_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
6310#define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM) 6427#define MIPI_DPHY_PARAM(tc) _TRANSCODER(tc, _MIPIA_DPHY_PARAM, \
6428 _MIPIB_DPHY_PARAM)
6311#define EXIT_ZERO_COUNT_SHIFT 24 6429#define EXIT_ZERO_COUNT_SHIFT 24
6312#define EXIT_ZERO_COUNT_MASK (0x3f << 24) 6430#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
6313#define TRAIL_COUNT_SHIFT 16 6431#define TRAIL_COUNT_SHIFT 16
@@ -6318,34 +6436,41 @@ enum punit_power_well {
6318#define PREPARE_COUNT_MASK (0x3f << 0) 6436#define PREPARE_COUNT_MASK (0x3f << 0)
6319 6437
6320/* bits 31:0 */ 6438/* bits 31:0 */
6321#define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084) 6439#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
6322#define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884) 6440#define _MIPIB_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
6323#define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL) 6441#define MIPI_DBI_BW_CTRL(tc) _TRANSCODER(tc, _MIPIA_DBI_BW_CTRL, \
6324 6442 _MIPIB_DBI_BW_CTRL)
6325#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088) 6443
6326#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888) 6444#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
6327#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT) 6445 + 0xb088)
6446#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
6447 + 0xb888)
6448#define MIPI_CLK_LANE_SWITCH_TIME_CNT(tc) _TRANSCODER(tc, \
6449 _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
6328#define LP_HS_SSW_CNT_SHIFT 16 6450#define LP_HS_SSW_CNT_SHIFT 16
6329#define LP_HS_SSW_CNT_MASK (0xffff << 16) 6451#define LP_HS_SSW_CNT_MASK (0xffff << 16)
6330#define HS_LP_PWR_SW_CNT_SHIFT 0 6452#define HS_LP_PWR_SW_CNT_SHIFT 0
6331#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0) 6453#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
6332 6454
6333#define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c) 6455#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
6334#define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c) 6456#define _MIPIB_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
6335#define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL) 6457#define MIPI_STOP_STATE_STALL(tc) _TRANSCODER(tc, \
6458 _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
6336#define STOP_STATE_STALL_COUNTER_SHIFT 0 6459#define STOP_STATE_STALL_COUNTER_SHIFT 0
6337#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0) 6460#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
6338 6461
6339#define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090) 6462#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
6340#define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890) 6463#define _MIPIB_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
6341#define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1) 6464#define MIPI_INTR_STAT_REG_1(tc) _TRANSCODER(tc, \
6342#define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094) 6465 _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
6343#define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894) 6466#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
6344#define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1) 6467#define _MIPIB_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
6468#define MIPI_INTR_EN_REG_1(tc) _TRANSCODER(tc, _MIPIA_INTR_EN_REG_1, \
6469 _MIPIB_INTR_EN_REG_1)
6345#define RX_CONTENTION_DETECTED (1 << 0) 6470#define RX_CONTENTION_DETECTED (1 << 0)
6346 6471
6347/* XXX: only pipe A ?!? */ 6472/* XXX: only pipe A ?!? */
6348#define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100) 6473#define MIPIA_DBI_TYPEC_CTRL (dev_priv->mipi_mmio_base + 0xb100)
6349#define DBI_TYPEC_ENABLE (1 << 31) 6474#define DBI_TYPEC_ENABLE (1 << 31)
6350#define DBI_TYPEC_WIP (1 << 30) 6475#define DBI_TYPEC_WIP (1 << 30)
6351#define DBI_TYPEC_OPTION_SHIFT 28 6476#define DBI_TYPEC_OPTION_SHIFT 28
@@ -6359,9 +6484,10 @@ enum punit_power_well {
6359 6484
6360/* MIPI adapter registers */ 6485/* MIPI adapter registers */
6361 6486
6362#define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104) 6487#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
6363#define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904) 6488#define _MIPIB_CTRL (dev_priv->mipi_mmio_base + 0xb904)
6364#define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL) 6489#define MIPI_CTRL(tc) _TRANSCODER(tc, _MIPIA_CTRL, \
6490 _MIPIB_CTRL)
6365#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */ 6491#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
6366#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5) 6492#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
6367#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5) 6493#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
@@ -6373,50 +6499,52 @@ enum punit_power_well {
6373#define READ_REQUEST_PRIORITY_HIGH (3 << 3) 6499#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
6374#define RGB_FLIP_TO_BGR (1 << 2) 6500#define RGB_FLIP_TO_BGR (1 << 2)
6375 6501
6376#define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108) 6502#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
6377#define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908) 6503#define _MIPIB_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
6378#define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS) 6504#define MIPI_DATA_ADDRESS(tc) _TRANSCODER(tc, _MIPIA_DATA_ADDRESS, \
6505 _MIPIB_DATA_ADDRESS)
6379#define DATA_MEM_ADDRESS_SHIFT 5 6506#define DATA_MEM_ADDRESS_SHIFT 5
6380#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5) 6507#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
6381#define DATA_VALID (1 << 0) 6508#define DATA_VALID (1 << 0)
6382 6509
6383#define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c) 6510#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
6384#define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c) 6511#define _MIPIB_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
6385#define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH) 6512#define MIPI_DATA_LENGTH(tc) _TRANSCODER(tc, _MIPIA_DATA_LENGTH, \
6513 _MIPIB_DATA_LENGTH)
6386#define DATA_LENGTH_SHIFT 0 6514#define DATA_LENGTH_SHIFT 0
6387#define DATA_LENGTH_MASK (0xfffff << 0) 6515#define DATA_LENGTH_MASK (0xfffff << 0)
6388 6516
6389#define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110) 6517#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
6390#define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910) 6518#define _MIPIB_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
6391#define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS) 6519#define MIPI_COMMAND_ADDRESS(tc) _TRANSCODER(tc, \
6520 _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
6392#define COMMAND_MEM_ADDRESS_SHIFT 5 6521#define COMMAND_MEM_ADDRESS_SHIFT 5
6393#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5) 6522#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
6394#define AUTO_PWG_ENABLE (1 << 2) 6523#define AUTO_PWG_ENABLE (1 << 2)
6395#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1) 6524#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
6396#define COMMAND_VALID (1 << 0) 6525#define COMMAND_VALID (1 << 0)
6397 6526
6398#define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114) 6527#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
6399#define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914) 6528#define _MIPIB_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
6400#define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH) 6529#define MIPI_COMMAND_LENGTH(tc) _TRANSCODER(tc, _MIPIA_COMMAND_LENGTH, \
6530 _MIPIB_COMMAND_LENGTH)
6401#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ 6531#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
6402#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) 6532#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
6403 6533
6404#define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118) 6534#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
6405#define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918) 6535#define _MIPIB_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
6406#define MIPI_READ_DATA_RETURN(pipe, n) \ 6536#define MIPI_READ_DATA_RETURN(tc, n) \
6407 (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */ 6537 (_TRANSCODER(tc, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) \
6538 + 4 * (n)) /* n: 0...7 */
6408 6539
6409#define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138) 6540#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
6410#define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938) 6541#define _MIPIB_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
6411#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID) 6542#define MIPI_READ_DATA_VALID(tc) _TRANSCODER(tc, \
6543 _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
6412#define READ_DATA_VALID(n) (1 << (n)) 6544#define READ_DATA_VALID(n) (1 << (n))
6413 6545
6414/* For UMS only (deprecated): */ 6546/* For UMS only (deprecated): */
6415#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000) 6547#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
6416#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800) 6548#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
6417#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
6418#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
6419#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
6420#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
6421 6549
6422#endif /* _I915_REG_H_ */ 6550#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 86ce39aad0ff..ae7fd8fc27f0 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -47,22 +47,45 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
47 47
48 intel_runtime_pm_get(dev_priv); 48 intel_runtime_pm_get(dev_priv);
49 49
50 /* On VLV, residency time is in CZ units rather than 1.28us */ 50 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
51 if (IS_VALLEYVIEW(dev)) { 51 if (IS_VALLEYVIEW(dev)) {
52 u32 clkctl2; 52 u32 reg, czcount_30ns;
53 53
54 clkctl2 = I915_READ(VLV_CLK_CTL2) >> 54 if (IS_CHERRYVIEW(dev))
55 CLK_CTL2_CZCOUNT_30NS_SHIFT; 55 reg = CHV_CLK_CTL1;
56 if (!clkctl2) { 56 else
57 WARN(!clkctl2, "bogus CZ count value"); 57 reg = VLV_CLK_CTL2;
58
59 czcount_30ns = I915_READ(reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
60
61 if (!czcount_30ns) {
62 WARN(!czcount_30ns, "bogus CZ count value");
58 ret = 0; 63 ret = 0;
59 goto out; 64 goto out;
60 } 65 }
61 units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2); 66
67 units = 0;
68 div = 1000000ULL;
69
70 if (IS_CHERRYVIEW(dev)) {
71 /* Special case for 320Mhz */
72 if (czcount_30ns == 1) {
73 div = 10000000ULL;
74 units = 3125ULL;
75 } else {
76 /* chv counts are one less */
77 czcount_30ns += 1;
78 }
79 }
80
81 if (units == 0)
82 units = DIV_ROUND_UP_ULL(30ULL * bias,
83 (u64)czcount_30ns);
84
62 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) 85 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
63 units <<= 8; 86 units <<= 8;
64 87
65 div = 1000000ULL * bias; 88 div = div * bias;
66 } 89 }
67 90
68 raw_time = I915_READ(reg) * units; 91 raw_time = I915_READ(reg) * units;
@@ -461,11 +484,20 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
461 mutex_unlock(&dev->struct_mutex); 484 mutex_unlock(&dev->struct_mutex);
462 485
463 if (attr == &dev_attr_gt_RP0_freq_mhz) { 486 if (attr == &dev_attr_gt_RP0_freq_mhz) {
464 val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER; 487 if (IS_VALLEYVIEW(dev))
488 val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
489 else
490 val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
465 } else if (attr == &dev_attr_gt_RP1_freq_mhz) { 491 } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
466 val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER; 492 if (IS_VALLEYVIEW(dev))
493 val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
494 else
495 val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
467 } else if (attr == &dev_attr_gt_RPn_freq_mhz) { 496 } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
468 val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER; 497 if (IS_VALLEYVIEW(dev))
498 val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq);
499 else
500 val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
469 } else { 501 } else {
470 BUG(); 502 BUG();
471 } 503 }
@@ -486,6 +518,9 @@ static const struct attribute *vlv_attrs[] = {
486 &dev_attr_gt_cur_freq_mhz.attr, 518 &dev_attr_gt_cur_freq_mhz.attr,
487 &dev_attr_gt_max_freq_mhz.attr, 519 &dev_attr_gt_max_freq_mhz.attr,
488 &dev_attr_gt_min_freq_mhz.attr, 520 &dev_attr_gt_min_freq_mhz.attr,
521 &dev_attr_gt_RP0_freq_mhz.attr,
522 &dev_attr_gt_RP1_freq_mhz.attr,
523 &dev_attr_gt_RPn_freq_mhz.attr,
489 &dev_attr_vlv_rpe_freq_mhz.attr, 524 &dev_attr_vlv_rpe_freq_mhz.attr,
490 NULL, 525 NULL,
491}; 526};
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 827498e081df..608ed302f24d 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -336,11 +336,12 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
336 336
337 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; 337 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
338 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; 338 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
339 dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
339 DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, " 340 DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
340 "active %s, min brightness %u, level %u\n", 341 "active %s, min brightness %u, level %u\n",
341 dev_priv->vbt.backlight.pwm_freq_hz, 342 dev_priv->vbt.backlight.pwm_freq_hz,
342 dev_priv->vbt.backlight.active_low_pwm ? "low" : "high", 343 dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
343 entry->min_brightness, 344 dev_priv->vbt.backlight.min_brightness,
344 backlight_data->level[panel_type]); 345 backlight_data->level[panel_type]);
345} 346}
346 347
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 5a045d3bd77e..2efaf8e8d9c4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -137,6 +137,18 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
137 pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder); 137 pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
138} 138}
139 139
140static void hsw_crt_pre_enable(struct intel_encoder *encoder)
141{
142 struct drm_device *dev = encoder->base.dev;
143 struct drm_i915_private *dev_priv = dev->dev_private;
144
145 WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
146 I915_WRITE(SPLL_CTL,
147 SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
148 POSTING_READ(SPLL_CTL);
149 udelay(20);
150}
151
140/* Note: The caller is required to filter out dpms modes not supported by the 152/* Note: The caller is required to filter out dpms modes not supported by the
141 * platform. */ 153 * platform. */
142static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) 154static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -194,6 +206,20 @@ static void intel_disable_crt(struct intel_encoder *encoder)
194 intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF); 206 intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
195} 207}
196 208
209
210static void hsw_crt_post_disable(struct intel_encoder *encoder)
211{
212 struct drm_device *dev = encoder->base.dev;
213 struct drm_i915_private *dev_priv = dev->dev_private;
214 uint32_t val;
215
216 DRM_DEBUG_KMS("Disabling SPLL\n");
217 val = I915_READ(SPLL_CTL);
218 WARN_ON(!(val & SPLL_PLL_ENABLE));
219 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
220 POSTING_READ(SPLL_CTL);
221}
222
197static void intel_enable_crt(struct intel_encoder *encoder) 223static void intel_enable_crt(struct intel_encoder *encoder)
198{ 224{
199 struct intel_crt *crt = intel_encoder_to_crt(encoder); 225 struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -289,8 +315,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
289 pipe_config->pipe_bpp = 24; 315 pipe_config->pipe_bpp = 24;
290 316
291 /* FDI must always be 2.7 GHz */ 317 /* FDI must always be 2.7 GHz */
292 if (HAS_DDI(dev)) 318 if (HAS_DDI(dev)) {
319 pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
293 pipe_config->port_clock = 135000 * 2; 320 pipe_config->port_clock = 135000 * 2;
321 }
294 322
295 return true; 323 return true;
296} 324}
@@ -632,8 +660,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
632 struct intel_load_detect_pipe tmp; 660 struct intel_load_detect_pipe tmp;
633 struct drm_modeset_acquire_ctx ctx; 661 struct drm_modeset_acquire_ctx ctx;
634 662
635 intel_runtime_pm_get(dev_priv);
636
637 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", 663 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
638 connector->base.id, connector->name, 664 connector->base.id, connector->name,
639 force); 665 force);
@@ -685,8 +711,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
685 711
686out: 712out:
687 intel_display_power_put(dev_priv, power_domain); 713 intel_display_power_put(dev_priv, power_domain);
688 intel_runtime_pm_put(dev_priv);
689
690 return status; 714 return status;
691} 715}
692 716
@@ -860,6 +884,8 @@ void intel_crt_init(struct drm_device *dev)
860 if (HAS_DDI(dev)) { 884 if (HAS_DDI(dev)) {
861 crt->base.get_config = hsw_crt_get_config; 885 crt->base.get_config = hsw_crt_get_config;
862 crt->base.get_hw_state = intel_ddi_get_hw_state; 886 crt->base.get_hw_state = intel_ddi_get_hw_state;
887 crt->base.pre_enable = hsw_crt_pre_enable;
888 crt->base.post_disable = hsw_crt_post_disable;
863 } else { 889 } else {
864 crt->base.get_config = intel_crt_get_config; 890 crt->base.get_config = intel_crt_get_config;
865 crt->base.get_hw_state = intel_crt_get_hw_state; 891 crt->base.get_hw_state = intel_crt_get_hw_state;
@@ -869,7 +895,7 @@ void intel_crt_init(struct drm_device *dev)
869 895
870 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 896 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
871 897
872 drm_sysfs_connector_add(connector); 898 drm_connector_register(connector);
873 899
874 if (!I915_HAS_HOTPLUG(dev)) 900 if (!I915_HAS_HOTPLUG(dev))
875 intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; 901 intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b17b9c7c769f..5db0b5552e39 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -76,12 +76,12 @@ static const u32 bdw_ddi_translations_edp[] = {
76 0x00FFFFFF, 0x00000012, /* eDP parameters */ 76 0x00FFFFFF, 0x00000012, /* eDP parameters */
77 0x00EBAFFF, 0x00020011, 77 0x00EBAFFF, 0x00020011,
78 0x00C71FFF, 0x0006000F, 78 0x00C71FFF, 0x0006000F,
79 0x00AAAFFF, 0x000E000A,
79 0x00FFFFFF, 0x00020011, 80 0x00FFFFFF, 0x00020011,
80 0x00DB6FFF, 0x0005000F, 81 0x00DB6FFF, 0x0005000F,
81 0x00BEEFFF, 0x000A000C, 82 0x00BEEFFF, 0x000A000C,
82 0x00FFFFFF, 0x0005000F, 83 0x00FFFFFF, 0x0005000F,
83 0x00DB6FFF, 0x000A000C, 84 0x00DB6FFF, 0x000A000C,
84 0x00FFFFFF, 0x000A000C,
85 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/ 85 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
86}; 86};
87 87
@@ -89,12 +89,12 @@ static const u32 bdw_ddi_translations_dp[] = {
89 0x00FFFFFF, 0x0007000E, /* DP parameters */ 89 0x00FFFFFF, 0x0007000E, /* DP parameters */
90 0x00D75FFF, 0x000E000A, 90 0x00D75FFF, 0x000E000A,
91 0x00BEFFFF, 0x00140006, 91 0x00BEFFFF, 0x00140006,
92 0x80B2CFFF, 0x001B0002,
92 0x00FFFFFF, 0x000E000A, 93 0x00FFFFFF, 0x000E000A,
93 0x00D75FFF, 0x00180004, 94 0x00D75FFF, 0x00180004,
94 0x80CB2FFF, 0x001B0002, 95 0x80CB2FFF, 0x001B0002,
95 0x00F7DFFF, 0x00180004, 96 0x00F7DFFF, 0x00180004,
96 0x80D75FFF, 0x001B0002, 97 0x80D75FFF, 0x001B0002,
97 0x80FFFFFF, 0x001B0002,
98 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/ 98 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
99}; 99};
100 100
@@ -116,7 +116,10 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
116 struct drm_encoder *encoder = &intel_encoder->base; 116 struct drm_encoder *encoder = &intel_encoder->base;
117 int type = intel_encoder->type; 117 int type = intel_encoder->type;
118 118
119 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || 119 if (type == INTEL_OUTPUT_DP_MST) {
120 struct intel_digital_port *intel_dig_port = enc_to_mst(encoder)->primary;
121 return intel_dig_port->port;
122 } else if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
120 type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) { 123 type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
121 struct intel_digital_port *intel_dig_port = 124 struct intel_digital_port *intel_dig_port =
122 enc_to_dig_port(encoder); 125 enc_to_dig_port(encoder);
@@ -277,7 +280,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
277 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); 280 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
278 281
279 /* Configure Port Clock Select */ 282 /* Configure Port Clock Select */
280 I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel); 283 I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config.ddi_pll_sel);
284 WARN_ON(intel_crtc->config.ddi_pll_sel != PORT_CLK_SEL_SPLL);
281 285
282 /* Start the training iterating through available voltages and emphasis, 286 /* Start the training iterating through available voltages and emphasis,
283 * testing each value twice. */ 287 * testing each value twice. */
@@ -364,6 +368,18 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
364 DRM_ERROR("FDI link training failed!\n"); 368 DRM_ERROR("FDI link training failed!\n");
365} 369}
366 370
371void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
372{
373 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
374 struct intel_digital_port *intel_dig_port =
375 enc_to_dig_port(&encoder->base);
376
377 intel_dp->DP = intel_dig_port->saved_port_bits |
378 DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
379 intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
380
381}
382
367static struct intel_encoder * 383static struct intel_encoder *
368intel_ddi_get_crtc_encoder(struct drm_crtc *crtc) 384intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
369{ 385{
@@ -385,53 +401,6 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
385 return ret; 401 return ret;
386} 402}
387 403
388void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
389{
390 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
391 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
392 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
393 uint32_t val;
394
395 switch (intel_crtc->ddi_pll_sel) {
396 case PORT_CLK_SEL_SPLL:
397 plls->spll_refcount--;
398 if (plls->spll_refcount == 0) {
399 DRM_DEBUG_KMS("Disabling SPLL\n");
400 val = I915_READ(SPLL_CTL);
401 WARN_ON(!(val & SPLL_PLL_ENABLE));
402 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
403 POSTING_READ(SPLL_CTL);
404 }
405 break;
406 case PORT_CLK_SEL_WRPLL1:
407 plls->wrpll1_refcount--;
408 if (plls->wrpll1_refcount == 0) {
409 DRM_DEBUG_KMS("Disabling WRPLL 1\n");
410 val = I915_READ(WRPLL_CTL1);
411 WARN_ON(!(val & WRPLL_PLL_ENABLE));
412 I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
413 POSTING_READ(WRPLL_CTL1);
414 }
415 break;
416 case PORT_CLK_SEL_WRPLL2:
417 plls->wrpll2_refcount--;
418 if (plls->wrpll2_refcount == 0) {
419 DRM_DEBUG_KMS("Disabling WRPLL 2\n");
420 val = I915_READ(WRPLL_CTL2);
421 WARN_ON(!(val & WRPLL_PLL_ENABLE));
422 I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
423 POSTING_READ(WRPLL_CTL2);
424 }
425 break;
426 }
427
428 WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
429 WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
430 WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
431
432 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
433}
434
435#define LC_FREQ 2700 404#define LC_FREQ 2700
436#define LC_FREQ_2K (LC_FREQ * 2000) 405#define LC_FREQ_2K (LC_FREQ * 2000)
437 406
@@ -592,9 +561,9 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
592 u32 wrpll; 561 u32 wrpll;
593 562
594 wrpll = I915_READ(reg); 563 wrpll = I915_READ(reg);
595 switch (wrpll & SPLL_PLL_REF_MASK) { 564 switch (wrpll & WRPLL_PLL_REF_MASK) {
596 case SPLL_PLL_SSC: 565 case WRPLL_PLL_SSC:
597 case SPLL_PLL_NON_SSC: 566 case WRPLL_PLL_NON_SSC:
598 /* 567 /*
599 * We could calculate spread here, but our checking 568 * We could calculate spread here, but our checking
600 * code only cares about 5% accuracy, and spread is a max of 569 * code only cares about 5% accuracy, and spread is a max of
@@ -602,7 +571,7 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
602 */ 571 */
603 refclk = 135; 572 refclk = 135;
604 break; 573 break;
605 case SPLL_PLL_LCPLL: 574 case WRPLL_PLL_LCPLL:
606 refclk = LC_FREQ; 575 refclk = LC_FREQ;
607 break; 576 break;
608 default: 577 default:
@@ -618,15 +587,14 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
618 return (refclk * n * 100) / (p * r); 587 return (refclk * n * 100) / (p * r);
619} 588}
620 589
621static void intel_ddi_clock_get(struct intel_encoder *encoder, 590void intel_ddi_clock_get(struct intel_encoder *encoder,
622 struct intel_crtc_config *pipe_config) 591 struct intel_crtc_config *pipe_config)
623{ 592{
624 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 593 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
625 enum port port = intel_ddi_get_encoder_port(encoder);
626 int link_clock = 0; 594 int link_clock = 0;
627 u32 val, pll; 595 u32 val, pll;
628 596
629 val = I915_READ(PORT_CLK_SEL(port)); 597 val = pipe_config->ddi_pll_sel;
630 switch (val & PORT_CLK_SEL_MASK) { 598 switch (val & PORT_CLK_SEL_MASK) {
631 case PORT_CLK_SEL_LCPLL_810: 599 case PORT_CLK_SEL_LCPLL_810:
632 link_clock = 81000; 600 link_clock = 81000;
@@ -750,173 +718,37 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
750{ 718{
751 struct drm_crtc *crtc = &intel_crtc->base; 719 struct drm_crtc *crtc = &intel_crtc->base;
752 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 720 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
753 struct drm_encoder *encoder = &intel_encoder->base;
754 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
755 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
756 int type = intel_encoder->type; 721 int type = intel_encoder->type;
757 enum pipe pipe = intel_crtc->pipe;
758 int clock = intel_crtc->config.port_clock; 722 int clock = intel_crtc->config.port_clock;
759 723
760 intel_ddi_put_crtc_pll(crtc); 724 intel_put_shared_dpll(intel_crtc);
761
762 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
763 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
764 725
765 switch (intel_dp->link_bw) { 726 if (type == INTEL_OUTPUT_HDMI) {
766 case DP_LINK_BW_1_62: 727 struct intel_shared_dpll *pll;
767 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810; 728 uint32_t val;
768 break;
769 case DP_LINK_BW_2_7:
770 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
771 break;
772 case DP_LINK_BW_5_4:
773 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
774 break;
775 default:
776 DRM_ERROR("Link bandwidth %d unsupported\n",
777 intel_dp->link_bw);
778 return false;
779 }
780
781 } else if (type == INTEL_OUTPUT_HDMI) {
782 uint32_t reg, val;
783 unsigned p, n2, r2; 729 unsigned p, n2, r2;
784 730
785 intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); 731 intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
786 732
787 val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 | 733 val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
788 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | 734 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
789 WRPLL_DIVIDER_POST(p); 735 WRPLL_DIVIDER_POST(p);
790 736
791 if (val == I915_READ(WRPLL_CTL1)) { 737 intel_crtc->config.dpll_hw_state.wrpll = val;
792 DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
793 pipe_name(pipe));
794 reg = WRPLL_CTL1;
795 } else if (val == I915_READ(WRPLL_CTL2)) {
796 DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
797 pipe_name(pipe));
798 reg = WRPLL_CTL2;
799 } else if (plls->wrpll1_refcount == 0) {
800 DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
801 pipe_name(pipe));
802 reg = WRPLL_CTL1;
803 } else if (plls->wrpll2_refcount == 0) {
804 DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
805 pipe_name(pipe));
806 reg = WRPLL_CTL2;
807 } else {
808 DRM_ERROR("No WRPLLs available!\n");
809 return false;
810 }
811
812 DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
813 clock, p, n2, r2);
814
815 if (reg == WRPLL_CTL1) {
816 plls->wrpll1_refcount++;
817 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
818 } else {
819 plls->wrpll2_refcount++;
820 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
821 }
822 738
823 } else if (type == INTEL_OUTPUT_ANALOG) { 739 pll = intel_get_shared_dpll(intel_crtc);
824 if (plls->spll_refcount == 0) { 740 if (pll == NULL) {
825 DRM_DEBUG_KMS("Using SPLL on pipe %c\n", 741 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
826 pipe_name(pipe)); 742 pipe_name(intel_crtc->pipe));
827 plls->spll_refcount++;
828 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
829 } else {
830 DRM_ERROR("SPLL already in use\n");
831 return false; 743 return false;
832 } 744 }
833 745
834 } else { 746 intel_crtc->config.ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
835 WARN(1, "Invalid DDI encoder type %d\n", type);
836 return false;
837 } 747 }
838 748
839 return true; 749 return true;
840} 750}
841 751
842/*
843 * To be called after intel_ddi_pll_select(). That one selects the PLL to be
844 * used, this one actually enables the PLL.
845 */
846void intel_ddi_pll_enable(struct intel_crtc *crtc)
847{
848 struct drm_device *dev = crtc->base.dev;
849 struct drm_i915_private *dev_priv = dev->dev_private;
850 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
851 int clock = crtc->config.port_clock;
852 uint32_t reg, cur_val, new_val;
853 int refcount;
854 const char *pll_name;
855 uint32_t enable_bit = (1 << 31);
856 unsigned int p, n2, r2;
857
858 BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE);
859 BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE);
860
861 switch (crtc->ddi_pll_sel) {
862 case PORT_CLK_SEL_LCPLL_2700:
863 case PORT_CLK_SEL_LCPLL_1350:
864 case PORT_CLK_SEL_LCPLL_810:
865 /*
866 * LCPLL should always be enabled at this point of the mode set
867 * sequence, so nothing to do.
868 */
869 return;
870
871 case PORT_CLK_SEL_SPLL:
872 pll_name = "SPLL";
873 reg = SPLL_CTL;
874 refcount = plls->spll_refcount;
875 new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz |
876 SPLL_PLL_SSC;
877 break;
878
879 case PORT_CLK_SEL_WRPLL1:
880 case PORT_CLK_SEL_WRPLL2:
881 if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) {
882 pll_name = "WRPLL1";
883 reg = WRPLL_CTL1;
884 refcount = plls->wrpll1_refcount;
885 } else {
886 pll_name = "WRPLL2";
887 reg = WRPLL_CTL2;
888 refcount = plls->wrpll2_refcount;
889 }
890
891 intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
892
893 new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
894 WRPLL_DIVIDER_REFERENCE(r2) |
895 WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p);
896
897 break;
898
899 case PORT_CLK_SEL_NONE:
900 WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n");
901 return;
902 default:
903 WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel);
904 return;
905 }
906
907 cur_val = I915_READ(reg);
908
909 WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount);
910 if (refcount == 1) {
911 WARN(cur_val & enable_bit, "%s already enabled\n", pll_name);
912 I915_WRITE(reg, new_val);
913 POSTING_READ(reg);
914 udelay(20);
915 } else {
916 WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name);
917 }
918}
919
920void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) 752void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
921{ 753{
922 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 754 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
@@ -926,8 +758,7 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
926 int type = intel_encoder->type; 758 int type = intel_encoder->type;
927 uint32_t temp; 759 uint32_t temp;
928 760
929 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 761 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
930
931 temp = TRANS_MSA_SYNC_CLK; 762 temp = TRANS_MSA_SYNC_CLK;
932 switch (intel_crtc->config.pipe_bpp) { 763 switch (intel_crtc->config.pipe_bpp) {
933 case 18: 764 case 18:
@@ -949,6 +780,21 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
949 } 780 }
950} 781}
951 782
783void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
784{
785 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
786 struct drm_device *dev = crtc->dev;
787 struct drm_i915_private *dev_priv = dev->dev_private;
788 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
789 uint32_t temp;
790 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
791 if (state == true)
792 temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
793 else
794 temp &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
795 I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
796}
797
952void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) 798void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
953{ 799{
954 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 800 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -995,7 +841,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
995 * eDP when not using the panel fitter, and when not 841 * eDP when not using the panel fitter, and when not
996 * using motion blur mitigation (which we don't 842 * using motion blur mitigation (which we don't
997 * support). */ 843 * support). */
998 if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled) 844 if (IS_HASWELL(dev) &&
845 (intel_crtc->config.pch_pfit.enabled ||
846 intel_crtc->config.pch_pfit.force_thru))
999 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; 847 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
1000 else 848 else
1001 temp |= TRANS_DDI_EDP_INPUT_A_ON; 849 temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1026,7 +874,19 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
1026 type == INTEL_OUTPUT_EDP) { 874 type == INTEL_OUTPUT_EDP) {
1027 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 875 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1028 876
1029 temp |= TRANS_DDI_MODE_SELECT_DP_SST; 877 if (intel_dp->is_mst) {
878 temp |= TRANS_DDI_MODE_SELECT_DP_MST;
879 } else
880 temp |= TRANS_DDI_MODE_SELECT_DP_SST;
881
882 temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
883 } else if (type == INTEL_OUTPUT_DP_MST) {
884 struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp;
885
886 if (intel_dp->is_mst) {
887 temp |= TRANS_DDI_MODE_SELECT_DP_MST;
888 } else
889 temp |= TRANS_DDI_MODE_SELECT_DP_SST;
1030 890
1031 temp |= DDI_PORT_WIDTH(intel_dp->lane_count); 891 temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
1032 } else { 892 } else {
@@ -1043,7 +903,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
1043 uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); 903 uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1044 uint32_t val = I915_READ(reg); 904 uint32_t val = I915_READ(reg);
1045 905
1046 val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK); 906 val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
1047 val |= TRANS_DDI_PORT_NONE; 907 val |= TRANS_DDI_PORT_NONE;
1048 I915_WRITE(reg, val); 908 I915_WRITE(reg, val);
1049} 909}
@@ -1082,8 +942,11 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
1082 case TRANS_DDI_MODE_SELECT_DP_SST: 942 case TRANS_DDI_MODE_SELECT_DP_SST:
1083 if (type == DRM_MODE_CONNECTOR_eDP) 943 if (type == DRM_MODE_CONNECTOR_eDP)
1084 return true; 944 return true;
1085 case TRANS_DDI_MODE_SELECT_DP_MST:
1086 return (type == DRM_MODE_CONNECTOR_DisplayPort); 945 return (type == DRM_MODE_CONNECTOR_DisplayPort);
946 case TRANS_DDI_MODE_SELECT_DP_MST:
947 /* if the transcoder is in MST state then
948 * connector isn't connected */
949 return false;
1087 950
1088 case TRANS_DDI_MODE_SELECT_FDI: 951 case TRANS_DDI_MODE_SELECT_FDI:
1089 return (type == DRM_MODE_CONNECTOR_VGA); 952 return (type == DRM_MODE_CONNECTOR_VGA);
@@ -1135,6 +998,9 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
1135 998
1136 if ((tmp & TRANS_DDI_PORT_MASK) 999 if ((tmp & TRANS_DDI_PORT_MASK)
1137 == TRANS_DDI_SELECT_PORT(port)) { 1000 == TRANS_DDI_SELECT_PORT(port)) {
1001 if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST)
1002 return false;
1003
1138 *pipe = i; 1004 *pipe = i;
1139 return true; 1005 return true;
1140 } 1006 }
@@ -1146,76 +1012,6 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
1146 return false; 1012 return false;
1147} 1013}
1148 1014
1149static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
1150 enum pipe pipe)
1151{
1152 uint32_t temp, ret;
1153 enum port port = I915_MAX_PORTS;
1154 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1155 pipe);
1156 int i;
1157
1158 if (cpu_transcoder == TRANSCODER_EDP) {
1159 port = PORT_A;
1160 } else {
1161 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1162 temp &= TRANS_DDI_PORT_MASK;
1163
1164 for (i = PORT_B; i <= PORT_E; i++)
1165 if (temp == TRANS_DDI_SELECT_PORT(i))
1166 port = i;
1167 }
1168
1169 if (port == I915_MAX_PORTS) {
1170 WARN(1, "Pipe %c enabled on an unknown port\n",
1171 pipe_name(pipe));
1172 ret = PORT_CLK_SEL_NONE;
1173 } else {
1174 ret = I915_READ(PORT_CLK_SEL(port));
1175 DRM_DEBUG_KMS("Pipe %c connected to port %c using clock "
1176 "0x%08x\n", pipe_name(pipe), port_name(port),
1177 ret);
1178 }
1179
1180 return ret;
1181}
1182
1183void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
1184{
1185 struct drm_i915_private *dev_priv = dev->dev_private;
1186 enum pipe pipe;
1187 struct intel_crtc *intel_crtc;
1188
1189 dev_priv->ddi_plls.spll_refcount = 0;
1190 dev_priv->ddi_plls.wrpll1_refcount = 0;
1191 dev_priv->ddi_plls.wrpll2_refcount = 0;
1192
1193 for_each_pipe(pipe) {
1194 intel_crtc =
1195 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1196
1197 if (!intel_crtc->active) {
1198 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
1199 continue;
1200 }
1201
1202 intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
1203 pipe);
1204
1205 switch (intel_crtc->ddi_pll_sel) {
1206 case PORT_CLK_SEL_SPLL:
1207 dev_priv->ddi_plls.spll_refcount++;
1208 break;
1209 case PORT_CLK_SEL_WRPLL1:
1210 dev_priv->ddi_plls.wrpll1_refcount++;
1211 break;
1212 case PORT_CLK_SEL_WRPLL2:
1213 dev_priv->ddi_plls.wrpll2_refcount++;
1214 break;
1215 }
1216 }
1217}
1218
1219void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) 1015void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
1220{ 1016{
1221 struct drm_crtc *crtc = &intel_crtc->base; 1017 struct drm_crtc *crtc = &intel_crtc->base;
@@ -1261,17 +1057,13 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1261 intel_edp_panel_on(intel_dp); 1057 intel_edp_panel_on(intel_dp);
1262 } 1058 }
1263 1059
1264 WARN_ON(crtc->ddi_pll_sel == PORT_CLK_SEL_NONE); 1060 WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
1265 I915_WRITE(PORT_CLK_SEL(port), crtc->ddi_pll_sel); 1061 I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
1266 1062
1267 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 1063 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
1268 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1064 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1269 struct intel_digital_port *intel_dig_port =
1270 enc_to_dig_port(encoder);
1271 1065
1272 intel_dp->DP = intel_dig_port->saved_port_bits | 1066 intel_ddi_init_dp_buf_reg(intel_encoder);
1273 DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
1274 intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
1275 1067
1276 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1068 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1277 intel_dp_start_link_train(intel_dp); 1069 intel_dp_start_link_train(intel_dp);
@@ -1418,10 +1210,60 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1418 } 1210 }
1419} 1211}
1420 1212
1213static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
1214 struct intel_shared_dpll *pll)
1215{
1216 I915_WRITE(WRPLL_CTL(pll->id), pll->hw_state.wrpll);
1217 POSTING_READ(WRPLL_CTL(pll->id));
1218 udelay(20);
1219}
1220
1221static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv,
1222 struct intel_shared_dpll *pll)
1223{
1224 uint32_t val;
1225
1226 val = I915_READ(WRPLL_CTL(pll->id));
1227 I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
1228 POSTING_READ(WRPLL_CTL(pll->id));
1229}
1230
1231static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1232 struct intel_shared_dpll *pll,
1233 struct intel_dpll_hw_state *hw_state)
1234{
1235 uint32_t val;
1236
1237 if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
1238 return false;
1239
1240 val = I915_READ(WRPLL_CTL(pll->id));
1241 hw_state->wrpll = val;
1242
1243 return val & WRPLL_PLL_ENABLE;
1244}
1245
1246static const char * const hsw_ddi_pll_names[] = {
1247 "WRPLL 1",
1248 "WRPLL 2",
1249};
1250
1421void intel_ddi_pll_init(struct drm_device *dev) 1251void intel_ddi_pll_init(struct drm_device *dev)
1422{ 1252{
1423 struct drm_i915_private *dev_priv = dev->dev_private; 1253 struct drm_i915_private *dev_priv = dev->dev_private;
1424 uint32_t val = I915_READ(LCPLL_CTL); 1254 uint32_t val = I915_READ(LCPLL_CTL);
1255 int i;
1256
1257 dev_priv->num_shared_dpll = 2;
1258
1259 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
1260 dev_priv->shared_dplls[i].id = i;
1261 dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
1262 dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable;
1263 dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable;
1264 dev_priv->shared_dplls[i].get_hw_state =
1265 hsw_ddi_pll_get_hw_state;
1266 }
1425 1267
1426 /* The LCPLL register should be turned on by the BIOS. For now let's 1268 /* The LCPLL register should be turned on by the BIOS. For now let's
1427 * just check its state and print errors in case something is wrong. 1269 * just check its state and print errors in case something is wrong.
@@ -1465,10 +1307,15 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
1465 intel_wait_ddi_buf_idle(dev_priv, port); 1307 intel_wait_ddi_buf_idle(dev_priv, port);
1466 } 1308 }
1467 1309
1468 val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST | 1310 val = DP_TP_CTL_ENABLE |
1469 DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; 1311 DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
1470 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 1312 if (intel_dp->is_mst)
1471 val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; 1313 val |= DP_TP_CTL_MODE_MST;
1314 else {
1315 val |= DP_TP_CTL_MODE_SST;
1316 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1317 val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
1318 }
1472 I915_WRITE(DP_TP_CTL(port), val); 1319 I915_WRITE(DP_TP_CTL(port), val);
1473 POSTING_READ(DP_TP_CTL(port)); 1320 POSTING_READ(DP_TP_CTL(port));
1474 1321
@@ -1507,11 +1354,16 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
1507 1354
1508static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder) 1355static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
1509{ 1356{
1510 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 1357 struct intel_digital_port *intel_dig_port = enc_to_dig_port(&intel_encoder->base);
1511 int type = intel_encoder->type; 1358 int type = intel_dig_port->base.type;
1359
1360 if (type != INTEL_OUTPUT_DISPLAYPORT &&
1361 type != INTEL_OUTPUT_EDP &&
1362 type != INTEL_OUTPUT_UNKNOWN) {
1363 return;
1364 }
1512 1365
1513 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) 1366 intel_dp_hot_plug(intel_encoder);
1514 intel_dp_check_link_status(intel_dp);
1515} 1367}
1516 1368
1517void intel_ddi_get_config(struct intel_encoder *encoder, 1369void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -1663,15 +1515,13 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1663 struct intel_digital_port *intel_dig_port; 1515 struct intel_digital_port *intel_dig_port;
1664 struct intel_encoder *intel_encoder; 1516 struct intel_encoder *intel_encoder;
1665 struct drm_encoder *encoder; 1517 struct drm_encoder *encoder;
1666 struct intel_connector *hdmi_connector = NULL;
1667 struct intel_connector *dp_connector = NULL;
1668 bool init_hdmi, init_dp; 1518 bool init_hdmi, init_dp;
1669 1519
1670 init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi || 1520 init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
1671 dev_priv->vbt.ddi_port_info[port].supports_hdmi); 1521 dev_priv->vbt.ddi_port_info[port].supports_hdmi);
1672 init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp; 1522 init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
1673 if (!init_dp && !init_hdmi) { 1523 if (!init_dp && !init_hdmi) {
1674 DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n", 1524 DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, assuming it is\n",
1675 port_name(port)); 1525 port_name(port));
1676 init_hdmi = true; 1526 init_hdmi = true;
1677 init_dp = true; 1527 init_dp = true;
@@ -1701,20 +1551,28 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1701 DDI_A_4_LANES); 1551 DDI_A_4_LANES);
1702 1552
1703 intel_encoder->type = INTEL_OUTPUT_UNKNOWN; 1553 intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
1704 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 1554 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
1705 intel_encoder->cloneable = 0; 1555 intel_encoder->cloneable = 0;
1706 intel_encoder->hot_plug = intel_ddi_hot_plug; 1556 intel_encoder->hot_plug = intel_ddi_hot_plug;
1707 1557
1708 if (init_dp) 1558 if (init_dp) {
1709 dp_connector = intel_ddi_init_dp_connector(intel_dig_port); 1559 if (!intel_ddi_init_dp_connector(intel_dig_port))
1560 goto err;
1561
1562 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
1563 dev_priv->hpd_irq_port[port] = intel_dig_port;
1564 }
1710 1565
1711 /* In theory we don't need the encoder->type check, but leave it just in 1566 /* In theory we don't need the encoder->type check, but leave it just in
1712 * case we have some really bad VBTs... */ 1567 * case we have some really bad VBTs... */
1713 if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) 1568 if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
1714 hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port); 1569 if (!intel_ddi_init_hdmi_connector(intel_dig_port))
1715 1570 goto err;
1716 if (!dp_connector && !hdmi_connector) {
1717 drm_encoder_cleanup(encoder);
1718 kfree(intel_dig_port);
1719 } 1571 }
1572
1573 return;
1574
1575err:
1576 drm_encoder_cleanup(encoder);
1577 kfree(intel_dig_port);
1720} 1578}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f0be855ddf45..99eb7cad62a8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -39,12 +39,45 @@
39#include "i915_trace.h" 39#include "i915_trace.h"
40#include <drm/drm_dp_helper.h> 40#include <drm/drm_dp_helper.h>
41#include <drm/drm_crtc_helper.h> 41#include <drm/drm_crtc_helper.h>
42#include <drm/drm_plane_helper.h>
43#include <drm/drm_rect.h>
42#include <linux/dma_remapping.h> 44#include <linux/dma_remapping.h>
43 45
46/* Primary plane formats supported by all gen */
47#define COMMON_PRIMARY_FORMATS \
48 DRM_FORMAT_C8, \
49 DRM_FORMAT_RGB565, \
50 DRM_FORMAT_XRGB8888, \
51 DRM_FORMAT_ARGB8888
52
53/* Primary plane formats for gen <= 3 */
54static const uint32_t intel_primary_formats_gen2[] = {
55 COMMON_PRIMARY_FORMATS,
56 DRM_FORMAT_XRGB1555,
57 DRM_FORMAT_ARGB1555,
58};
59
60/* Primary plane formats for gen >= 4 */
61static const uint32_t intel_primary_formats_gen4[] = {
62 COMMON_PRIMARY_FORMATS, \
63 DRM_FORMAT_XBGR8888,
64 DRM_FORMAT_ABGR8888,
65 DRM_FORMAT_XRGB2101010,
66 DRM_FORMAT_ARGB2101010,
67 DRM_FORMAT_XBGR2101010,
68 DRM_FORMAT_ABGR2101010,
69};
70
71/* Cursor formats */
72static const uint32_t intel_cursor_formats[] = {
73 DRM_FORMAT_ARGB8888,
74};
75
44#define DIV_ROUND_CLOSEST_ULL(ll, d) \ 76#define DIV_ROUND_CLOSEST_ULL(ll, d) \
45 ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; }) 77({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
46 78
47static void intel_increase_pllclock(struct drm_crtc *crtc); 79static void intel_increase_pllclock(struct drm_device *dev,
80 enum pipe pipe);
48static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 81static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
49 82
50static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 83static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
@@ -68,6 +101,14 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc);
68static void intel_set_pipe_csc(struct drm_crtc *crtc); 101static void intel_set_pipe_csc(struct drm_crtc *crtc);
69static void vlv_prepare_pll(struct intel_crtc *crtc); 102static void vlv_prepare_pll(struct intel_crtc *crtc);
70 103
104static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
105{
106 if (!connector->mst_port)
107 return connector->encoder;
108 else
109 return &connector->mst_port->mst_encoders[pipe]->base;
110}
111
71typedef struct { 112typedef struct {
72 int min, max; 113 int min, max;
73} intel_range_t; 114} intel_range_t;
@@ -1061,11 +1102,6 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
1061 bool cur_state; 1102 bool cur_state;
1062 struct intel_dpll_hw_state hw_state; 1103 struct intel_dpll_hw_state hw_state;
1063 1104
1064 if (HAS_PCH_LPT(dev_priv->dev)) {
1065 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
1066 return;
1067 }
1068
1069 if (WARN (!pll, 1105 if (WARN (!pll,
1070 "asserting DPLL %s with no DPLL\n", state_string(state))) 1106 "asserting DPLL %s with no DPLL\n", state_string(state)))
1071 return; 1107 return;
@@ -1481,9 +1517,6 @@ static void intel_reset_dpio(struct drm_device *dev)
1481{ 1517{
1482 struct drm_i915_private *dev_priv = dev->dev_private; 1518 struct drm_i915_private *dev_priv = dev->dev_private;
1483 1519
1484 if (!IS_VALLEYVIEW(dev))
1485 return;
1486
1487 if (IS_CHERRYVIEW(dev)) { 1520 if (IS_CHERRYVIEW(dev)) {
1488 enum dpio_phy phy; 1521 enum dpio_phy phy;
1489 u32 val; 1522 u32 val;
@@ -1505,26 +1538,6 @@ static void intel_reset_dpio(struct drm_device *dev)
1505 I915_WRITE(DISPLAY_PHY_CONTROL, 1538 I915_WRITE(DISPLAY_PHY_CONTROL,
1506 PHY_COM_LANE_RESET_DEASSERT(phy, val)); 1539 PHY_COM_LANE_RESET_DEASSERT(phy, val));
1507 } 1540 }
1508
1509 } else {
1510 /*
1511 * If DPIO has already been reset, e.g. by BIOS, just skip all
1512 * this.
1513 */
1514 if (I915_READ(DPIO_CTL) & DPIO_CMNRST)
1515 return;
1516
1517 /*
1518 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1519 * Need to assert and de-assert PHY SB reset by gating the
1520 * common lane power, then un-gating it.
1521 * Simply ungating isn't enough to reset the PHY enough to get
1522 * ports and lanes running.
1523 */
1524 __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
1525 false);
1526 __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
1527 true);
1528 } 1541 }
1529} 1542}
1530 1543
@@ -1712,6 +1725,17 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1712 val &= ~DPIO_DCLKP_EN; 1725 val &= ~DPIO_DCLKP_EN;
1713 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1726 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1714 1727
1728 /* disable left/right clock distribution */
1729 if (pipe != PIPE_B) {
1730 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1731 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1732 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1733 } else {
1734 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1735 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1736 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1737 }
1738
1715 mutex_unlock(&dev_priv->dpio_lock); 1739 mutex_unlock(&dev_priv->dpio_lock);
1716} 1740}
1717 1741
@@ -1749,6 +1773,9 @@ static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1749 struct drm_i915_private *dev_priv = dev->dev_private; 1773 struct drm_i915_private *dev_priv = dev->dev_private;
1750 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1774 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1751 1775
1776 if (WARN_ON(pll == NULL))
1777 return;
1778
1752 WARN_ON(!pll->refcount); 1779 WARN_ON(!pll->refcount);
1753 if (pll->active == 0) { 1780 if (pll->active == 0) {
1754 DRM_DEBUG_DRIVER("setting up %s\n", pll->name); 1781 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
@@ -1790,12 +1817,14 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1790 } 1817 }
1791 WARN_ON(pll->on); 1818 WARN_ON(pll->on);
1792 1819
1820 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1821
1793 DRM_DEBUG_KMS("enabling %s\n", pll->name); 1822 DRM_DEBUG_KMS("enabling %s\n", pll->name);
1794 pll->enable(dev_priv, pll); 1823 pll->enable(dev_priv, pll);
1795 pll->on = true; 1824 pll->on = true;
1796} 1825}
1797 1826
1798static void intel_disable_shared_dpll(struct intel_crtc *crtc) 1827void intel_disable_shared_dpll(struct intel_crtc *crtc)
1799{ 1828{
1800 struct drm_device *dev = crtc->base.dev; 1829 struct drm_device *dev = crtc->base.dev;
1801 struct drm_i915_private *dev_priv = dev->dev_private; 1830 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1826,6 +1855,8 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1826 DRM_DEBUG_KMS("disabling %s\n", pll->name); 1855 DRM_DEBUG_KMS("disabling %s\n", pll->name);
1827 pll->disable(dev_priv, pll); 1856 pll->disable(dev_priv, pll);
1828 pll->on = false; 1857 pll->on = false;
1858
1859 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1829} 1860}
1830 1861
1831static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1862static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -2172,6 +2203,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
2172 u32 alignment; 2203 u32 alignment;
2173 int ret; 2204 int ret;
2174 2205
2206 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2207
2175 switch (obj->tiling_mode) { 2208 switch (obj->tiling_mode) {
2176 case I915_TILING_NONE: 2209 case I915_TILING_NONE:
2177 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 2210 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
@@ -2228,6 +2261,8 @@ err_interruptible:
2228 2261
2229void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) 2262void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2230{ 2263{
2264 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2265
2231 i915_gem_object_unpin_fence(obj); 2266 i915_gem_object_unpin_fence(obj);
2232 i915_gem_object_unpin_from_display_plane(obj); 2267 i915_gem_object_unpin_from_display_plane(obj);
2233} 2268}
@@ -2314,6 +2349,7 @@ static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
2314 goto out_unref_obj; 2349 goto out_unref_obj;
2315 } 2350 }
2316 2351
2352 obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
2317 mutex_unlock(&dev->struct_mutex); 2353 mutex_unlock(&dev->struct_mutex);
2318 2354
2319 DRM_DEBUG_KMS("plane fb obj %p\n", obj); 2355 DRM_DEBUG_KMS("plane fb obj %p\n", obj);
@@ -2331,7 +2367,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2331 struct drm_device *dev = intel_crtc->base.dev; 2367 struct drm_device *dev = intel_crtc->base.dev;
2332 struct drm_crtc *c; 2368 struct drm_crtc *c;
2333 struct intel_crtc *i; 2369 struct intel_crtc *i;
2334 struct intel_framebuffer *fb; 2370 struct drm_i915_gem_object *obj;
2335 2371
2336 if (!intel_crtc->base.primary->fb) 2372 if (!intel_crtc->base.primary->fb)
2337 return; 2373 return;
@@ -2352,13 +2388,17 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2352 if (c == &intel_crtc->base) 2388 if (c == &intel_crtc->base)
2353 continue; 2389 continue;
2354 2390
2355 if (!i->active || !c->primary->fb) 2391 if (!i->active)
2392 continue;
2393
2394 obj = intel_fb_obj(c->primary->fb);
2395 if (obj == NULL)
2356 continue; 2396 continue;
2357 2397
2358 fb = to_intel_framebuffer(c->primary->fb); 2398 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2359 if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) {
2360 drm_framebuffer_reference(c->primary->fb); 2399 drm_framebuffer_reference(c->primary->fb);
2361 intel_crtc->base.primary->fb = c->primary->fb; 2400 intel_crtc->base.primary->fb = c->primary->fb;
2401 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2362 break; 2402 break;
2363 } 2403 }
2364 } 2404 }
@@ -2371,16 +2411,12 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2371 struct drm_device *dev = crtc->dev; 2411 struct drm_device *dev = crtc->dev;
2372 struct drm_i915_private *dev_priv = dev->dev_private; 2412 struct drm_i915_private *dev_priv = dev->dev_private;
2373 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2413 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2374 struct intel_framebuffer *intel_fb; 2414 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2375 struct drm_i915_gem_object *obj;
2376 int plane = intel_crtc->plane; 2415 int plane = intel_crtc->plane;
2377 unsigned long linear_offset; 2416 unsigned long linear_offset;
2378 u32 dspcntr; 2417 u32 dspcntr;
2379 u32 reg; 2418 u32 reg;
2380 2419
2381 intel_fb = to_intel_framebuffer(fb);
2382 obj = intel_fb->obj;
2383
2384 reg = DSPCNTR(plane); 2420 reg = DSPCNTR(plane);
2385 dspcntr = I915_READ(reg); 2421 dspcntr = I915_READ(reg);
2386 /* Mask out pixel format bits in case we change it */ 2422 /* Mask out pixel format bits in case we change it */
@@ -2461,16 +2497,12 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2461 struct drm_device *dev = crtc->dev; 2497 struct drm_device *dev = crtc->dev;
2462 struct drm_i915_private *dev_priv = dev->dev_private; 2498 struct drm_i915_private *dev_priv = dev->dev_private;
2463 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2499 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2464 struct intel_framebuffer *intel_fb; 2500 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2465 struct drm_i915_gem_object *obj;
2466 int plane = intel_crtc->plane; 2501 int plane = intel_crtc->plane;
2467 unsigned long linear_offset; 2502 unsigned long linear_offset;
2468 u32 dspcntr; 2503 u32 dspcntr;
2469 u32 reg; 2504 u32 reg;
2470 2505
2471 intel_fb = to_intel_framebuffer(fb);
2472 obj = intel_fb->obj;
2473
2474 reg = DSPCNTR(plane); 2506 reg = DSPCNTR(plane);
2475 dspcntr = I915_READ(reg); 2507 dspcntr = I915_READ(reg);
2476 /* Mask out pixel format bits in case we change it */ 2508 /* Mask out pixel format bits in case we change it */
@@ -2546,7 +2578,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2546 2578
2547 if (dev_priv->display.disable_fbc) 2579 if (dev_priv->display.disable_fbc)
2548 dev_priv->display.disable_fbc(dev); 2580 dev_priv->display.disable_fbc(dev);
2549 intel_increase_pllclock(crtc); 2581 intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
2550 2582
2551 dev_priv->display.update_primary_plane(crtc, fb, x, y); 2583 dev_priv->display.update_primary_plane(crtc, fb, x, y);
2552 2584
@@ -2601,7 +2633,7 @@ void intel_display_handle_reset(struct drm_device *dev)
2601static int 2633static int
2602intel_finish_fb(struct drm_framebuffer *old_fb) 2634intel_finish_fb(struct drm_framebuffer *old_fb)
2603{ 2635{
2604 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; 2636 struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
2605 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2637 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2606 bool was_interruptible = dev_priv->mm.interruptible; 2638 bool was_interruptible = dev_priv->mm.interruptible;
2607 int ret; 2639 int ret;
@@ -2647,7 +2679,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2647 struct drm_device *dev = crtc->dev; 2679 struct drm_device *dev = crtc->dev;
2648 struct drm_i915_private *dev_priv = dev->dev_private; 2680 struct drm_i915_private *dev_priv = dev->dev_private;
2649 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2681 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2650 struct drm_framebuffer *old_fb; 2682 enum pipe pipe = intel_crtc->pipe;
2683 struct drm_framebuffer *old_fb = crtc->primary->fb;
2684 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2685 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
2651 int ret; 2686 int ret;
2652 2687
2653 if (intel_crtc_has_pending_flip(crtc)) { 2688 if (intel_crtc_has_pending_flip(crtc)) {
@@ -2669,9 +2704,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2669 } 2704 }
2670 2705
2671 mutex_lock(&dev->struct_mutex); 2706 mutex_lock(&dev->struct_mutex);
2672 ret = intel_pin_and_fence_fb_obj(dev, 2707 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
2673 to_intel_framebuffer(fb)->obj, 2708 if (ret == 0)
2674 NULL); 2709 i915_gem_track_fb(old_obj, obj,
2710 INTEL_FRONTBUFFER_PRIMARY(pipe));
2675 mutex_unlock(&dev->struct_mutex); 2711 mutex_unlock(&dev->struct_mutex);
2676 if (ret != 0) { 2712 if (ret != 0) {
2677 DRM_ERROR("pin & fence failed\n"); 2713 DRM_ERROR("pin & fence failed\n");
@@ -2711,7 +2747,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2711 2747
2712 dev_priv->display.update_primary_plane(crtc, fb, x, y); 2748 dev_priv->display.update_primary_plane(crtc, fb, x, y);
2713 2749
2714 old_fb = crtc->primary->fb; 2750 if (intel_crtc->active)
2751 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
2752
2715 crtc->primary->fb = fb; 2753 crtc->primary->fb = fb;
2716 crtc->x = x; 2754 crtc->x = x;
2717 crtc->y = y; 2755 crtc->y = y;
@@ -2720,13 +2758,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2720 if (intel_crtc->active && old_fb != fb) 2758 if (intel_crtc->active && old_fb != fb)
2721 intel_wait_for_vblank(dev, intel_crtc->pipe); 2759 intel_wait_for_vblank(dev, intel_crtc->pipe);
2722 mutex_lock(&dev->struct_mutex); 2760 mutex_lock(&dev->struct_mutex);
2723 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); 2761 intel_unpin_fb_obj(old_obj);
2724 mutex_unlock(&dev->struct_mutex); 2762 mutex_unlock(&dev->struct_mutex);
2725 } 2763 }
2726 2764
2727 mutex_lock(&dev->struct_mutex); 2765 mutex_lock(&dev->struct_mutex);
2728 intel_update_fbc(dev); 2766 intel_update_fbc(dev);
2729 intel_edp_psr_update(dev);
2730 mutex_unlock(&dev->struct_mutex); 2767 mutex_unlock(&dev->struct_mutex);
2731 2768
2732 return 0; 2769 return 0;
@@ -3587,7 +3624,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
3587 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 3624 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3588} 3625}
3589 3626
3590static void intel_put_shared_dpll(struct intel_crtc *crtc) 3627void intel_put_shared_dpll(struct intel_crtc *crtc)
3591{ 3628{
3592 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 3629 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3593 3630
@@ -3607,7 +3644,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc)
3607 crtc->config.shared_dpll = DPLL_ID_PRIVATE; 3644 crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3608} 3645}
3609 3646
3610static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc) 3647struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3611{ 3648{
3612 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 3649 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3613 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 3650 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3818,7 +3855,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
3818 } 3855 }
3819 3856
3820 /* use legacy palette for Ironlake */ 3857 /* use legacy palette for Ironlake */
3821 if (HAS_PCH_SPLIT(dev)) 3858 if (!HAS_GMCH_DISPLAY(dev))
3822 palreg = LGC_PALETTE(pipe); 3859 palreg = LGC_PALETTE(pipe);
3823 3860
3824 /* Workaround : Do not read or write the pipe palette/gamma data while 3861 /* Workaround : Do not read or write the pipe palette/gamma data while
@@ -3860,30 +3897,6 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3860 */ 3897 */
3861} 3898}
3862 3899
3863/**
3864 * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
3865 * cursor plane briefly if not already running after enabling the display
3866 * plane.
3867 * This workaround avoids occasional blank screens when self refresh is
3868 * enabled.
3869 */
3870static void
3871g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
3872{
3873 u32 cntl = I915_READ(CURCNTR(pipe));
3874
3875 if ((cntl & CURSOR_MODE) == 0) {
3876 u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
3877
3878 I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
3879 I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
3880 intel_wait_for_vblank(dev_priv->dev, pipe);
3881 I915_WRITE(CURCNTR(pipe), cntl);
3882 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3883 I915_WRITE(FW_BLC_SELF, fw_bcl_self);
3884 }
3885}
3886
3887static void intel_crtc_enable_planes(struct drm_crtc *crtc) 3900static void intel_crtc_enable_planes(struct drm_crtc *crtc)
3888{ 3901{
3889 struct drm_device *dev = crtc->dev; 3902 struct drm_device *dev = crtc->dev;
@@ -3892,11 +3905,10 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
3892 int pipe = intel_crtc->pipe; 3905 int pipe = intel_crtc->pipe;
3893 int plane = intel_crtc->plane; 3906 int plane = intel_crtc->plane;
3894 3907
3908 drm_vblank_on(dev, pipe);
3909
3895 intel_enable_primary_hw_plane(dev_priv, plane, pipe); 3910 intel_enable_primary_hw_plane(dev_priv, plane, pipe);
3896 intel_enable_planes(crtc); 3911 intel_enable_planes(crtc);
3897 /* The fixup needs to happen before cursor is enabled */
3898 if (IS_G4X(dev))
3899 g4x_fixup_plane(dev_priv, pipe);
3900 intel_crtc_update_cursor(crtc, true); 3912 intel_crtc_update_cursor(crtc, true);
3901 intel_crtc_dpms_overlay(intel_crtc, true); 3913 intel_crtc_dpms_overlay(intel_crtc, true);
3902 3914
@@ -3904,8 +3916,14 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
3904 3916
3905 mutex_lock(&dev->struct_mutex); 3917 mutex_lock(&dev->struct_mutex);
3906 intel_update_fbc(dev); 3918 intel_update_fbc(dev);
3907 intel_edp_psr_update(dev);
3908 mutex_unlock(&dev->struct_mutex); 3919 mutex_unlock(&dev->struct_mutex);
3920
3921 /*
3922 * FIXME: Once we grow proper nuclear flip support out of this we need
3923 * to compute the mask of flip planes precisely. For the time being
3924 * consider this a flip from a NULL plane.
3925 */
3926 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
3909} 3927}
3910 3928
3911static void intel_crtc_disable_planes(struct drm_crtc *crtc) 3929static void intel_crtc_disable_planes(struct drm_crtc *crtc)
@@ -3917,7 +3935,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
3917 int plane = intel_crtc->plane; 3935 int plane = intel_crtc->plane;
3918 3936
3919 intel_crtc_wait_for_pending_flips(crtc); 3937 intel_crtc_wait_for_pending_flips(crtc);
3920 drm_crtc_vblank_off(crtc);
3921 3938
3922 if (dev_priv->fbc.plane == plane) 3939 if (dev_priv->fbc.plane == plane)
3923 intel_disable_fbc(dev); 3940 intel_disable_fbc(dev);
@@ -3928,6 +3945,15 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
3928 intel_crtc_update_cursor(crtc, false); 3945 intel_crtc_update_cursor(crtc, false);
3929 intel_disable_planes(crtc); 3946 intel_disable_planes(crtc);
3930 intel_disable_primary_hw_plane(dev_priv, plane, pipe); 3947 intel_disable_primary_hw_plane(dev_priv, plane, pipe);
3948
3949 /*
3950 * FIXME: Once we grow proper nuclear flip support out of this we need
3951 * to compute the mask of flip planes precisely. For the time being
3952 * consider this a flip to a NULL plane.
3953 */
3954 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
3955
3956 drm_vblank_off(dev, pipe);
3931} 3957}
3932 3958
3933static void ironlake_crtc_enable(struct drm_crtc *crtc) 3959static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -4006,8 +4032,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
4006 cpt_verify_modeset(dev, intel_crtc->pipe); 4032 cpt_verify_modeset(dev, intel_crtc->pipe);
4007 4033
4008 intel_crtc_enable_planes(crtc); 4034 intel_crtc_enable_planes(crtc);
4009
4010 drm_crtc_vblank_on(crtc);
4011} 4035}
4012 4036
4013/* IPS only exists on ULT machines and is tied to pipe A. */ 4037/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -4059,6 +4083,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4059 if (intel_crtc->active) 4083 if (intel_crtc->active)
4060 return; 4084 return;
4061 4085
4086 if (intel_crtc_to_shared_dpll(intel_crtc))
4087 intel_enable_shared_dpll(intel_crtc);
4088
4062 if (intel_crtc->config.has_dp_encoder) 4089 if (intel_crtc->config.has_dp_encoder)
4063 intel_dp_set_m_n(intel_crtc); 4090 intel_dp_set_m_n(intel_crtc);
4064 4091
@@ -4083,16 +4110,15 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4083 intel_crtc->active = true; 4110 intel_crtc->active = true;
4084 4111
4085 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4112 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4086 if (intel_crtc->config.has_pch_encoder)
4087 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4088
4089 if (intel_crtc->config.has_pch_encoder)
4090 dev_priv->display.fdi_link_train(crtc);
4091
4092 for_each_encoder_on_crtc(dev, crtc, encoder) 4113 for_each_encoder_on_crtc(dev, crtc, encoder)
4093 if (encoder->pre_enable) 4114 if (encoder->pre_enable)
4094 encoder->pre_enable(encoder); 4115 encoder->pre_enable(encoder);
4095 4116
4117 if (intel_crtc->config.has_pch_encoder) {
4118 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4119 dev_priv->display.fdi_link_train(crtc);
4120 }
4121
4096 intel_ddi_enable_pipe_clock(intel_crtc); 4122 intel_ddi_enable_pipe_clock(intel_crtc);
4097 4123
4098 ironlake_pfit_enable(intel_crtc); 4124 ironlake_pfit_enable(intel_crtc);
@@ -4112,6 +4138,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4112 if (intel_crtc->config.has_pch_encoder) 4138 if (intel_crtc->config.has_pch_encoder)
4113 lpt_pch_enable(crtc); 4139 lpt_pch_enable(crtc);
4114 4140
4141 if (intel_crtc->config.dp_encoder_is_mst)
4142 intel_ddi_set_vc_payload_alloc(crtc, true);
4143
4115 for_each_encoder_on_crtc(dev, crtc, encoder) { 4144 for_each_encoder_on_crtc(dev, crtc, encoder) {
4116 encoder->enable(encoder); 4145 encoder->enable(encoder);
4117 intel_opregion_notify_encoder(encoder, true); 4146 intel_opregion_notify_encoder(encoder, true);
@@ -4121,8 +4150,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4121 * to change the workaround. */ 4150 * to change the workaround. */
4122 haswell_mode_set_planes_workaround(intel_crtc); 4151 haswell_mode_set_planes_workaround(intel_crtc);
4123 intel_crtc_enable_planes(crtc); 4152 intel_crtc_enable_planes(crtc);
4124
4125 drm_crtc_vblank_on(crtc);
4126} 4153}
4127 4154
4128static void ironlake_pfit_disable(struct intel_crtc *crtc) 4155static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -4162,6 +4189,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
4162 4189
4163 intel_disable_pipe(dev_priv, pipe); 4190 intel_disable_pipe(dev_priv, pipe);
4164 4191
4192 if (intel_crtc->config.dp_encoder_is_mst)
4193 intel_ddi_set_vc_payload_alloc(crtc, false);
4194
4165 ironlake_pfit_disable(intel_crtc); 4195 ironlake_pfit_disable(intel_crtc);
4166 4196
4167 for_each_encoder_on_crtc(dev, crtc, encoder) 4197 for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4200,7 +4230,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
4200 4230
4201 mutex_lock(&dev->struct_mutex); 4231 mutex_lock(&dev->struct_mutex);
4202 intel_update_fbc(dev); 4232 intel_update_fbc(dev);
4203 intel_edp_psr_update(dev);
4204 mutex_unlock(&dev->struct_mutex); 4233 mutex_unlock(&dev->struct_mutex);
4205} 4234}
4206 4235
@@ -4233,23 +4262,25 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4233 4262
4234 intel_ddi_disable_pipe_clock(intel_crtc); 4263 intel_ddi_disable_pipe_clock(intel_crtc);
4235 4264
4236 for_each_encoder_on_crtc(dev, crtc, encoder)
4237 if (encoder->post_disable)
4238 encoder->post_disable(encoder);
4239
4240 if (intel_crtc->config.has_pch_encoder) { 4265 if (intel_crtc->config.has_pch_encoder) {
4241 lpt_disable_pch_transcoder(dev_priv); 4266 lpt_disable_pch_transcoder(dev_priv);
4242 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); 4267 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4243 intel_ddi_fdi_disable(crtc); 4268 intel_ddi_fdi_disable(crtc);
4244 } 4269 }
4245 4270
4271 for_each_encoder_on_crtc(dev, crtc, encoder)
4272 if (encoder->post_disable)
4273 encoder->post_disable(encoder);
4274
4246 intel_crtc->active = false; 4275 intel_crtc->active = false;
4247 intel_update_watermarks(crtc); 4276 intel_update_watermarks(crtc);
4248 4277
4249 mutex_lock(&dev->struct_mutex); 4278 mutex_lock(&dev->struct_mutex);
4250 intel_update_fbc(dev); 4279 intel_update_fbc(dev);
4251 intel_edp_psr_update(dev);
4252 mutex_unlock(&dev->struct_mutex); 4280 mutex_unlock(&dev->struct_mutex);
4281
4282 if (intel_crtc_to_shared_dpll(intel_crtc))
4283 intel_disable_shared_dpll(intel_crtc);
4253} 4284}
4254 4285
4255static void ironlake_crtc_off(struct drm_crtc *crtc) 4286static void ironlake_crtc_off(struct drm_crtc *crtc)
@@ -4258,10 +4289,6 @@ static void ironlake_crtc_off(struct drm_crtc *crtc)
4258 intel_put_shared_dpll(intel_crtc); 4289 intel_put_shared_dpll(intel_crtc);
4259} 4290}
4260 4291
4261static void haswell_crtc_off(struct drm_crtc *crtc)
4262{
4263 intel_ddi_put_crtc_pll(crtc);
4264}
4265 4292
4266static void i9xx_pfit_enable(struct intel_crtc *crtc) 4293static void i9xx_pfit_enable(struct intel_crtc *crtc)
4267{ 4294{
@@ -4287,6 +4314,23 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
4287 I915_WRITE(BCLRPAT(crtc->pipe), 0); 4314 I915_WRITE(BCLRPAT(crtc->pipe), 0);
4288} 4315}
4289 4316
4317static enum intel_display_power_domain port_to_power_domain(enum port port)
4318{
4319 switch (port) {
4320 case PORT_A:
4321 return POWER_DOMAIN_PORT_DDI_A_4_LANES;
4322 case PORT_B:
4323 return POWER_DOMAIN_PORT_DDI_B_4_LANES;
4324 case PORT_C:
4325 return POWER_DOMAIN_PORT_DDI_C_4_LANES;
4326 case PORT_D:
4327 return POWER_DOMAIN_PORT_DDI_D_4_LANES;
4328 default:
4329 WARN_ON_ONCE(1);
4330 return POWER_DOMAIN_PORT_OTHER;
4331 }
4332}
4333
4290#define for_each_power_domain(domain, mask) \ 4334#define for_each_power_domain(domain, mask) \
4291 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 4335 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
4292 if ((1 << (domain)) & (mask)) 4336 if ((1 << (domain)) & (mask))
@@ -4305,19 +4349,10 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
4305 case INTEL_OUTPUT_HDMI: 4349 case INTEL_OUTPUT_HDMI:
4306 case INTEL_OUTPUT_EDP: 4350 case INTEL_OUTPUT_EDP:
4307 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 4351 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
4308 switch (intel_dig_port->port) { 4352 return port_to_power_domain(intel_dig_port->port);
4309 case PORT_A: 4353 case INTEL_OUTPUT_DP_MST:
4310 return POWER_DOMAIN_PORT_DDI_A_4_LANES; 4354 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
4311 case PORT_B: 4355 return port_to_power_domain(intel_dig_port->port);
4312 return POWER_DOMAIN_PORT_DDI_B_4_LANES;
4313 case PORT_C:
4314 return POWER_DOMAIN_PORT_DDI_C_4_LANES;
4315 case PORT_D:
4316 return POWER_DOMAIN_PORT_DDI_D_4_LANES;
4317 default:
4318 WARN_ON_ONCE(1);
4319 return POWER_DOMAIN_PORT_OTHER;
4320 }
4321 case INTEL_OUTPUT_ANALOG: 4356 case INTEL_OUTPUT_ANALOG:
4322 return POWER_DOMAIN_PORT_CRT; 4357 return POWER_DOMAIN_PORT_CRT;
4323 case INTEL_OUTPUT_DSI: 4358 case INTEL_OUTPUT_DSI:
@@ -4333,7 +4368,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4333 struct intel_encoder *intel_encoder; 4368 struct intel_encoder *intel_encoder;
4334 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4369 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4335 enum pipe pipe = intel_crtc->pipe; 4370 enum pipe pipe = intel_crtc->pipe;
4336 bool pfit_enabled = intel_crtc->config.pch_pfit.enabled;
4337 unsigned long mask; 4371 unsigned long mask;
4338 enum transcoder transcoder; 4372 enum transcoder transcoder;
4339 4373
@@ -4341,7 +4375,8 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4341 4375
4342 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 4376 mask = BIT(POWER_DOMAIN_PIPE(pipe));
4343 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 4377 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
4344 if (pfit_enabled) 4378 if (intel_crtc->config.pch_pfit.enabled ||
4379 intel_crtc->config.pch_pfit.force_thru)
4345 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 4380 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4346 4381
4347 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 4382 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
@@ -4398,7 +4433,8 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
4398 intel_display_set_init_power(dev_priv, false); 4433 intel_display_set_init_power(dev_priv, false);
4399} 4434}
4400 4435
4401int valleyview_get_vco(struct drm_i915_private *dev_priv) 4436/* returns HPLL frequency in kHz */
4437static int valleyview_get_vco(struct drm_i915_private *dev_priv)
4402{ 4438{
4403 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 4439 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
4404 4440
@@ -4408,7 +4444,23 @@ int valleyview_get_vco(struct drm_i915_private *dev_priv)
4408 CCK_FUSE_HPLL_FREQ_MASK; 4444 CCK_FUSE_HPLL_FREQ_MASK;
4409 mutex_unlock(&dev_priv->dpio_lock); 4445 mutex_unlock(&dev_priv->dpio_lock);
4410 4446
4411 return vco_freq[hpll_freq]; 4447 return vco_freq[hpll_freq] * 1000;
4448}
4449
4450static void vlv_update_cdclk(struct drm_device *dev)
4451{
4452 struct drm_i915_private *dev_priv = dev->dev_private;
4453
4454 dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
4455 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
4456 dev_priv->vlv_cdclk_freq);
4457
4458 /*
4459 * Program the gmbus_freq based on the cdclk frequency.
4460 * BSpec erroneously claims we should aim for 4MHz, but
4461 * in fact 1MHz is the correct frequency.
4462 */
4463 I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq);
4412} 4464}
4413 4465
4414/* Adjust CDclk dividers to allow high res or save power if possible */ 4466/* Adjust CDclk dividers to allow high res or save power if possible */
@@ -4417,12 +4469,11 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4417 struct drm_i915_private *dev_priv = dev->dev_private; 4469 struct drm_i915_private *dev_priv = dev->dev_private;
4418 u32 val, cmd; 4470 u32 val, cmd;
4419 4471
4420 WARN_ON(valleyview_cur_cdclk(dev_priv) != dev_priv->vlv_cdclk_freq); 4472 WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
4421 dev_priv->vlv_cdclk_freq = cdclk;
4422 4473
4423 if (cdclk >= 320) /* jump to highest voltage for 400MHz too */ 4474 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
4424 cmd = 2; 4475 cmd = 2;
4425 else if (cdclk == 266) 4476 else if (cdclk == 266667)
4426 cmd = 1; 4477 cmd = 1;
4427 else 4478 else
4428 cmd = 0; 4479 cmd = 0;
@@ -4439,18 +4490,23 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4439 } 4490 }
4440 mutex_unlock(&dev_priv->rps.hw_lock); 4491 mutex_unlock(&dev_priv->rps.hw_lock);
4441 4492
4442 if (cdclk == 400) { 4493 if (cdclk == 400000) {
4443 u32 divider, vco; 4494 u32 divider, vco;
4444 4495
4445 vco = valleyview_get_vco(dev_priv); 4496 vco = valleyview_get_vco(dev_priv);
4446 divider = ((vco << 1) / cdclk) - 1; 4497 divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
4447 4498
4448 mutex_lock(&dev_priv->dpio_lock); 4499 mutex_lock(&dev_priv->dpio_lock);
4449 /* adjust cdclk divider */ 4500 /* adjust cdclk divider */
4450 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 4501 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4451 val &= ~0xf; 4502 val &= ~DISPLAY_FREQUENCY_VALUES;
4452 val |= divider; 4503 val |= divider;
4453 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); 4504 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
4505
4506 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
4507 DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
4508 50))
4509 DRM_ERROR("timed out waiting for CDclk change\n");
4454 mutex_unlock(&dev_priv->dpio_lock); 4510 mutex_unlock(&dev_priv->dpio_lock);
4455 } 4511 }
4456 4512
@@ -4463,54 +4519,43 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4463 * For high bandwidth configs, we set a higher latency in the bunit 4519 * For high bandwidth configs, we set a higher latency in the bunit
4464 * so that the core display fetch happens in time to avoid underruns. 4520 * so that the core display fetch happens in time to avoid underruns.
4465 */ 4521 */
4466 if (cdclk == 400) 4522 if (cdclk == 400000)
4467 val |= 4500 / 250; /* 4.5 usec */ 4523 val |= 4500 / 250; /* 4.5 usec */
4468 else 4524 else
4469 val |= 3000 / 250; /* 3.0 usec */ 4525 val |= 3000 / 250; /* 3.0 usec */
4470 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); 4526 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
4471 mutex_unlock(&dev_priv->dpio_lock); 4527 mutex_unlock(&dev_priv->dpio_lock);
4472 4528
4473 /* Since we changed the CDclk, we need to update the GMBUSFREQ too */ 4529 vlv_update_cdclk(dev);
4474 intel_i2c_reset(dev);
4475}
4476
4477int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
4478{
4479 int cur_cdclk, vco;
4480 int divider;
4481
4482 vco = valleyview_get_vco(dev_priv);
4483
4484 mutex_lock(&dev_priv->dpio_lock);
4485 divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4486 mutex_unlock(&dev_priv->dpio_lock);
4487
4488 divider &= 0xf;
4489
4490 cur_cdclk = (vco << 1) / (divider + 1);
4491
4492 return cur_cdclk;
4493} 4530}
4494 4531
4495static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, 4532static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4496 int max_pixclk) 4533 int max_pixclk)
4497{ 4534{
4535 int vco = valleyview_get_vco(dev_priv);
4536 int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000;
4537
4498 /* 4538 /*
4499 * Really only a few cases to deal with, as only 4 CDclks are supported: 4539 * Really only a few cases to deal with, as only 4 CDclks are supported:
4500 * 200MHz 4540 * 200MHz
4501 * 267MHz 4541 * 267MHz
4502 * 320MHz 4542 * 320/333MHz (depends on HPLL freq)
4503 * 400MHz 4543 * 400MHz
4504 * So we check to see whether we're above 90% of the lower bin and 4544 * So we check to see whether we're above 90% of the lower bin and
4505 * adjust if needed. 4545 * adjust if needed.
4546 *
4547 * We seem to get an unstable or solid color picture at 200MHz.
4548 * Not sure what's wrong. For now use 200MHz only when all pipes
4549 * are off.
4506 */ 4550 */
4507 if (max_pixclk > 288000) { 4551 if (max_pixclk > freq_320*9/10)
4508 return 400; 4552 return 400000;
4509 } else if (max_pixclk > 240000) { 4553 else if (max_pixclk > 266667*9/10)
4510 return 320; 4554 return freq_320;
4511 } else 4555 else if (max_pixclk > 0)
4512 return 266; 4556 return 266667;
4513 /* Looks like the 200MHz CDclk freq doesn't work on some configs */ 4557 else
4558 return 200000;
4514} 4559}
4515 4560
4516/* compute the max pixel clock for new configuration */ 4561/* compute the max pixel clock for new configuration */
@@ -4633,8 +4678,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4633 4678
4634 intel_crtc_enable_planes(crtc); 4679 intel_crtc_enable_planes(crtc);
4635 4680
4636 drm_crtc_vblank_on(crtc);
4637
4638 /* Underruns don't raise interrupts, so check manually. */ 4681 /* Underruns don't raise interrupts, so check manually. */
4639 i9xx_check_fifo_underruns(dev); 4682 i9xx_check_fifo_underruns(dev);
4640} 4683}
@@ -4727,8 +4770,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
4727 if (IS_GEN2(dev)) 4770 if (IS_GEN2(dev))
4728 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4771 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4729 4772
4730 drm_crtc_vblank_on(crtc);
4731
4732 /* Underruns don't raise interrupts, so check manually. */ 4773 /* Underruns don't raise interrupts, so check manually. */
4733 i9xx_check_fifo_underruns(dev); 4774 i9xx_check_fifo_underruns(dev);
4734} 4775}
@@ -4768,6 +4809,16 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4768 if (IS_GEN2(dev)) 4809 if (IS_GEN2(dev))
4769 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); 4810 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4770 4811
4812 /*
4813 * Vblank time updates from the shadow to live plane control register
4814 * are blocked if the memory self-refresh mode is active at that
4815 * moment. So to make sure the plane gets truly disabled, disable
4816 * first the self-refresh mode. The self-refresh enable bit in turn
4817 * will be checked/applied by the HW only at the next frame start
4818 * event which is after the vblank start event, so we need to have a
4819 * wait-for-vblank between disabling the plane and the pipe.
4820 */
4821 intel_set_memory_cxsr(dev_priv, false);
4771 intel_crtc_disable_planes(crtc); 4822 intel_crtc_disable_planes(crtc);
4772 4823
4773 for_each_encoder_on_crtc(dev, crtc, encoder) 4824 for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4776,9 +4827,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4776 /* 4827 /*
4777 * On gen2 planes are double buffered but the pipe isn't, so we must 4828 * On gen2 planes are double buffered but the pipe isn't, so we must
4778 * wait for planes to fully turn off before disabling the pipe. 4829 * wait for planes to fully turn off before disabling the pipe.
4830 * We also need to wait on all gmch platforms because of the
4831 * self-refresh mode constraint explained above.
4779 */ 4832 */
4780 if (IS_GEN2(dev)) 4833 intel_wait_for_vblank(dev, pipe);
4781 intel_wait_for_vblank(dev, pipe);
4782 4834
4783 intel_disable_pipe(dev_priv, pipe); 4835 intel_disable_pipe(dev_priv, pipe);
4784 4836
@@ -4805,7 +4857,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4805 4857
4806 mutex_lock(&dev->struct_mutex); 4858 mutex_lock(&dev->struct_mutex);
4807 intel_update_fbc(dev); 4859 intel_update_fbc(dev);
4808 intel_edp_psr_update(dev);
4809 mutex_unlock(&dev->struct_mutex); 4860 mutex_unlock(&dev->struct_mutex);
4810} 4861}
4811 4862
@@ -4843,23 +4894,49 @@ static void intel_crtc_update_sarea(struct drm_crtc *crtc,
4843 } 4894 }
4844} 4895}
4845 4896
4897/* Master function to enable/disable CRTC and corresponding power wells */
4898void intel_crtc_control(struct drm_crtc *crtc, bool enable)
4899{
4900 struct drm_device *dev = crtc->dev;
4901 struct drm_i915_private *dev_priv = dev->dev_private;
4902 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4903 enum intel_display_power_domain domain;
4904 unsigned long domains;
4905
4906 if (enable) {
4907 if (!intel_crtc->active) {
4908 domains = get_crtc_power_domains(crtc);
4909 for_each_power_domain(domain, domains)
4910 intel_display_power_get(dev_priv, domain);
4911 intel_crtc->enabled_power_domains = domains;
4912
4913 dev_priv->display.crtc_enable(crtc);
4914 }
4915 } else {
4916 if (intel_crtc->active) {
4917 dev_priv->display.crtc_disable(crtc);
4918
4919 domains = intel_crtc->enabled_power_domains;
4920 for_each_power_domain(domain, domains)
4921 intel_display_power_put(dev_priv, domain);
4922 intel_crtc->enabled_power_domains = 0;
4923 }
4924 }
4925}
4926
4846/** 4927/**
4847 * Sets the power management mode of the pipe and plane. 4928 * Sets the power management mode of the pipe and plane.
4848 */ 4929 */
4849void intel_crtc_update_dpms(struct drm_crtc *crtc) 4930void intel_crtc_update_dpms(struct drm_crtc *crtc)
4850{ 4931{
4851 struct drm_device *dev = crtc->dev; 4932 struct drm_device *dev = crtc->dev;
4852 struct drm_i915_private *dev_priv = dev->dev_private;
4853 struct intel_encoder *intel_encoder; 4933 struct intel_encoder *intel_encoder;
4854 bool enable = false; 4934 bool enable = false;
4855 4935
4856 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 4936 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4857 enable |= intel_encoder->connectors_active; 4937 enable |= intel_encoder->connectors_active;
4858 4938
4859 if (enable) 4939 intel_crtc_control(crtc, enable);
4860 dev_priv->display.crtc_enable(crtc);
4861 else
4862 dev_priv->display.crtc_disable(crtc);
4863 4940
4864 intel_crtc_update_sarea(crtc, enable); 4941 intel_crtc_update_sarea(crtc, enable);
4865} 4942}
@@ -4869,6 +4946,8 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
4869 struct drm_device *dev = crtc->dev; 4946 struct drm_device *dev = crtc->dev;
4870 struct drm_connector *connector; 4947 struct drm_connector *connector;
4871 struct drm_i915_private *dev_priv = dev->dev_private; 4948 struct drm_i915_private *dev_priv = dev->dev_private;
4949 struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
4950 enum pipe pipe = to_intel_crtc(crtc)->pipe;
4872 4951
4873 /* crtc should still be enabled when we disable it. */ 4952 /* crtc should still be enabled when we disable it. */
4874 WARN_ON(!crtc->enabled); 4953 WARN_ON(!crtc->enabled);
@@ -4877,13 +4956,11 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
4877 intel_crtc_update_sarea(crtc, false); 4956 intel_crtc_update_sarea(crtc, false);
4878 dev_priv->display.off(crtc); 4957 dev_priv->display.off(crtc);
4879 4958
4880 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
4881 assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
4882 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
4883
4884 if (crtc->primary->fb) { 4959 if (crtc->primary->fb) {
4885 mutex_lock(&dev->struct_mutex); 4960 mutex_lock(&dev->struct_mutex);
4886 intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj); 4961 intel_unpin_fb_obj(old_obj);
4962 i915_gem_track_fb(old_obj, NULL,
4963 INTEL_FRONTBUFFER_PRIMARY(pipe));
4887 mutex_unlock(&dev->struct_mutex); 4964 mutex_unlock(&dev->struct_mutex);
4888 crtc->primary->fb = NULL; 4965 crtc->primary->fb = NULL;
4889 } 4966 }
@@ -4939,24 +5016,31 @@ static void intel_connector_check_state(struct intel_connector *connector)
4939 connector->base.base.id, 5016 connector->base.base.id,
4940 connector->base.name); 5017 connector->base.name);
4941 5018
5019 /* there is no real hw state for MST connectors */
5020 if (connector->mst_port)
5021 return;
5022
4942 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, 5023 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
4943 "wrong connector dpms state\n"); 5024 "wrong connector dpms state\n");
4944 WARN(connector->base.encoder != &encoder->base, 5025 WARN(connector->base.encoder != &encoder->base,
4945 "active connector not linked to encoder\n"); 5026 "active connector not linked to encoder\n");
4946 WARN(!encoder->connectors_active,
4947 "encoder->connectors_active not set\n");
4948 5027
4949 encoder_enabled = encoder->get_hw_state(encoder, &pipe); 5028 if (encoder) {
4950 WARN(!encoder_enabled, "encoder not enabled\n"); 5029 WARN(!encoder->connectors_active,
4951 if (WARN_ON(!encoder->base.crtc)) 5030 "encoder->connectors_active not set\n");
4952 return; 5031
5032 encoder_enabled = encoder->get_hw_state(encoder, &pipe);
5033 WARN(!encoder_enabled, "encoder not enabled\n");
5034 if (WARN_ON(!encoder->base.crtc))
5035 return;
4953 5036
4954 crtc = encoder->base.crtc; 5037 crtc = encoder->base.crtc;
4955 5038
4956 WARN(!crtc->enabled, "crtc not enabled\n"); 5039 WARN(!crtc->enabled, "crtc not enabled\n");
4957 WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); 5040 WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
4958 WARN(pipe != to_intel_crtc(crtc)->pipe, 5041 WARN(pipe != to_intel_crtc(crtc)->pipe,
4959 "encoder active on the wrong pipe\n"); 5042 "encoder active on the wrong pipe\n");
5043 }
4960 } 5044 }
4961} 5045}
4962 5046
@@ -5161,9 +5245,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
5161 if (HAS_IPS(dev)) 5245 if (HAS_IPS(dev))
5162 hsw_compute_ips_config(crtc, pipe_config); 5246 hsw_compute_ips_config(crtc, pipe_config);
5163 5247
5164 /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old 5248 /*
5165 * clock survives for now. */ 5249 * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
5166 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 5250 * old clock survives for now.
5251 */
5252 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
5167 pipe_config->shared_dpll = crtc->config.shared_dpll; 5253 pipe_config->shared_dpll = crtc->config.shared_dpll;
5168 5254
5169 if (pipe_config->has_pch_encoder) 5255 if (pipe_config->has_pch_encoder)
@@ -5174,7 +5260,22 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
5174 5260
5175static int valleyview_get_display_clock_speed(struct drm_device *dev) 5261static int valleyview_get_display_clock_speed(struct drm_device *dev)
5176{ 5262{
5177 return 400000; /* FIXME */ 5263 struct drm_i915_private *dev_priv = dev->dev_private;
5264 int vco = valleyview_get_vco(dev_priv);
5265 u32 val;
5266 int divider;
5267
5268 mutex_lock(&dev_priv->dpio_lock);
5269 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5270 mutex_unlock(&dev_priv->dpio_lock);
5271
5272 divider = val & DISPLAY_FREQUENCY_VALUES;
5273
5274 WARN((val & DISPLAY_FREQUENCY_STATUS) !=
5275 (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
5276 "cdclk change in progress\n");
5277
5278 return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
5178} 5279}
5179 5280
5180static int i945_get_display_clock_speed(struct drm_device *dev) 5281static int i945_get_display_clock_speed(struct drm_device *dev)
@@ -6125,8 +6226,8 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
6125 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, 6226 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
6126 plane_config->tiled); 6227 plane_config->tiled);
6127 6228
6128 plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] * 6229 plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
6129 aligned_height, PAGE_SIZE); 6230 aligned_height);
6130 6231
6131 DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 6232 DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6132 pipe, plane, crtc->base.primary->fb->width, 6233 pipe, plane, crtc->base.primary->fb->width,
@@ -7145,8 +7246,8 @@ static void ironlake_get_plane_config(struct intel_crtc *crtc,
7145 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, 7246 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
7146 plane_config->tiled); 7247 plane_config->tiled);
7147 7248
7148 plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] * 7249 plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
7149 aligned_height, PAGE_SIZE); 7250 aligned_height);
7150 7251
7151 DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 7252 DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7152 pipe, plane, crtc->base.primary->fb->width, 7253 pipe, plane, crtc->base.primary->fb->width,
@@ -7163,6 +7264,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
7163 struct drm_i915_private *dev_priv = dev->dev_private; 7264 struct drm_i915_private *dev_priv = dev->dev_private;
7164 uint32_t tmp; 7265 uint32_t tmp;
7165 7266
7267 if (!intel_display_power_enabled(dev_priv,
7268 POWER_DOMAIN_PIPE(crtc->pipe)))
7269 return false;
7270
7166 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7271 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7167 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 7272 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7168 7273
@@ -7237,7 +7342,6 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
7237static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 7342static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
7238{ 7343{
7239 struct drm_device *dev = dev_priv->dev; 7344 struct drm_device *dev = dev_priv->dev;
7240 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
7241 struct intel_crtc *crtc; 7345 struct intel_crtc *crtc;
7242 7346
7243 for_each_intel_crtc(dev, crtc) 7347 for_each_intel_crtc(dev, crtc)
@@ -7245,14 +7349,15 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
7245 pipe_name(crtc->pipe)); 7349 pipe_name(crtc->pipe));
7246 7350
7247 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 7351 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
7248 WARN(plls->spll_refcount, "SPLL enabled\n"); 7352 WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
7249 WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n"); 7353 WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
7250 WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n"); 7354 WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
7251 WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); 7355 WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
7252 WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 7356 WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
7253 "CPU PWM1 enabled\n"); 7357 "CPU PWM1 enabled\n");
7254 WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 7358 if (IS_HASWELL(dev))
7255 "CPU PWM2 enabled\n"); 7359 WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
7360 "CPU PWM2 enabled\n");
7256 WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 7361 WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
7257 "PCH PWM1 enabled\n"); 7362 "PCH PWM1 enabled\n");
7258 WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 7363 WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
@@ -7265,7 +7370,17 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
7265 * gen-specific and since we only disable LCPLL after we fully disable 7370 * gen-specific and since we only disable LCPLL after we fully disable
7266 * the interrupts, the check below should be enough. 7371 * the interrupts, the check below should be enough.
7267 */ 7372 */
7268 WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n"); 7373 WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
7374}
7375
7376static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
7377{
7378 struct drm_device *dev = dev_priv->dev;
7379
7380 if (IS_HASWELL(dev))
7381 return I915_READ(D_COMP_HSW);
7382 else
7383 return I915_READ(D_COMP_BDW);
7269} 7384}
7270 7385
7271static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 7386static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
@@ -7276,12 +7391,12 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
7276 mutex_lock(&dev_priv->rps.hw_lock); 7391 mutex_lock(&dev_priv->rps.hw_lock);
7277 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, 7392 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
7278 val)) 7393 val))
7279 DRM_ERROR("Failed to disable D_COMP\n"); 7394 DRM_ERROR("Failed to write to D_COMP\n");
7280 mutex_unlock(&dev_priv->rps.hw_lock); 7395 mutex_unlock(&dev_priv->rps.hw_lock);
7281 } else { 7396 } else {
7282 I915_WRITE(D_COMP, val); 7397 I915_WRITE(D_COMP_BDW, val);
7398 POSTING_READ(D_COMP_BDW);
7283 } 7399 }
7284 POSTING_READ(D_COMP);
7285} 7400}
7286 7401
7287/* 7402/*
@@ -7319,12 +7434,13 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
7319 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) 7434 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
7320 DRM_ERROR("LCPLL still locked\n"); 7435 DRM_ERROR("LCPLL still locked\n");
7321 7436
7322 val = I915_READ(D_COMP); 7437 val = hsw_read_dcomp(dev_priv);
7323 val |= D_COMP_COMP_DISABLE; 7438 val |= D_COMP_COMP_DISABLE;
7324 hsw_write_dcomp(dev_priv, val); 7439 hsw_write_dcomp(dev_priv, val);
7325 ndelay(100); 7440 ndelay(100);
7326 7441
7327 if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 7442 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
7443 1))
7328 DRM_ERROR("D_COMP RCOMP still in progress\n"); 7444 DRM_ERROR("D_COMP RCOMP still in progress\n");
7329 7445
7330 if (allow_power_down) { 7446 if (allow_power_down) {
@@ -7373,7 +7489,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
7373 POSTING_READ(LCPLL_CTL); 7489 POSTING_READ(LCPLL_CTL);
7374 } 7490 }
7375 7491
7376 val = I915_READ(D_COMP); 7492 val = hsw_read_dcomp(dev_priv);
7377 val |= D_COMP_COMP_FORCE; 7493 val |= D_COMP_COMP_FORCE;
7378 val &= ~D_COMP_COMP_DISABLE; 7494 val &= ~D_COMP_COMP_DISABLE;
7379 hsw_write_dcomp(dev_priv, val); 7495 hsw_write_dcomp(dev_priv, val);
@@ -7479,13 +7595,59 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
7479 7595
7480 if (!intel_ddi_pll_select(intel_crtc)) 7596 if (!intel_ddi_pll_select(intel_crtc))
7481 return -EINVAL; 7597 return -EINVAL;
7482 intel_ddi_pll_enable(intel_crtc);
7483 7598
7484 intel_crtc->lowfreq_avail = false; 7599 intel_crtc->lowfreq_avail = false;
7485 7600
7486 return 0; 7601 return 0;
7487} 7602}
7488 7603
7604static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
7605 struct intel_crtc_config *pipe_config)
7606{
7607 struct drm_device *dev = crtc->base.dev;
7608 struct drm_i915_private *dev_priv = dev->dev_private;
7609 struct intel_shared_dpll *pll;
7610 enum port port;
7611 uint32_t tmp;
7612
7613 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
7614
7615 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
7616
7617 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
7618
7619 switch (pipe_config->ddi_pll_sel) {
7620 case PORT_CLK_SEL_WRPLL1:
7621 pipe_config->shared_dpll = DPLL_ID_WRPLL1;
7622 break;
7623 case PORT_CLK_SEL_WRPLL2:
7624 pipe_config->shared_dpll = DPLL_ID_WRPLL2;
7625 break;
7626 }
7627
7628 if (pipe_config->shared_dpll >= 0) {
7629 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
7630
7631 WARN_ON(!pll->get_hw_state(dev_priv, pll,
7632 &pipe_config->dpll_hw_state));
7633 }
7634
7635 /*
7636 * Haswell has only FDI/PCH transcoder A. It is which is connected to
7637 * DDI E. So just check whether this pipe is wired to DDI E and whether
7638 * the PCH transcoder is on.
7639 */
7640 if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
7641 pipe_config->has_pch_encoder = true;
7642
7643 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
7644 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7645 FDI_DP_PORT_WIDTH_SHIFT) + 1;
7646
7647 ironlake_get_fdi_m_n_config(crtc, pipe_config);
7648 }
7649}
7650
7489static bool haswell_get_pipe_config(struct intel_crtc *crtc, 7651static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7490 struct intel_crtc_config *pipe_config) 7652 struct intel_crtc_config *pipe_config)
7491{ 7653{
@@ -7531,22 +7693,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7531 if (!(tmp & PIPECONF_ENABLE)) 7693 if (!(tmp & PIPECONF_ENABLE))
7532 return false; 7694 return false;
7533 7695
7534 /* 7696 haswell_get_ddi_port_state(crtc, pipe_config);
7535 * Haswell has only FDI/PCH transcoder A. It is which is connected to
7536 * DDI E. So just check whether this pipe is wired to DDI E and whether
7537 * the PCH transcoder is on.
7538 */
7539 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
7540 if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
7541 I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
7542 pipe_config->has_pch_encoder = true;
7543
7544 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
7545 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7546 FDI_DP_PORT_WIDTH_SHIFT) + 1;
7547
7548 ironlake_get_fdi_m_n_config(crtc, pipe_config);
7549 }
7550 7697
7551 intel_get_pipe_timings(crtc, pipe_config); 7698 intel_get_pipe_timings(crtc, pipe_config);
7552 7699
@@ -7991,8 +8138,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
7991 struct drm_i915_private *dev_priv = dev->dev_private; 8138 struct drm_i915_private *dev_priv = dev->dev_private;
7992 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8139 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7993 int pipe = intel_crtc->pipe; 8140 int pipe = intel_crtc->pipe;
7994 int x = intel_crtc->cursor_x; 8141 int x = crtc->cursor_x;
7995 int y = intel_crtc->cursor_y; 8142 int y = crtc->cursor_y;
7996 u32 base = 0, pos = 0; 8143 u32 base = 0, pos = 0;
7997 8144
7998 if (on) 8145 if (on)
@@ -8036,21 +8183,27 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
8036 intel_crtc->cursor_base = base; 8183 intel_crtc->cursor_base = base;
8037} 8184}
8038 8185
8039static int intel_crtc_cursor_set(struct drm_crtc *crtc, 8186/*
8040 struct drm_file *file, 8187 * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
8041 uint32_t handle, 8188 *
8042 uint32_t width, uint32_t height) 8189 * Note that the object's reference will be consumed if the update fails. If
8190 * the update succeeds, the reference of the old object (if any) will be
8191 * consumed.
8192 */
8193static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8194 struct drm_i915_gem_object *obj,
8195 uint32_t width, uint32_t height)
8043{ 8196{
8044 struct drm_device *dev = crtc->dev; 8197 struct drm_device *dev = crtc->dev;
8045 struct drm_i915_private *dev_priv = dev->dev_private; 8198 struct drm_i915_private *dev_priv = dev->dev_private;
8046 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8199 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8047 struct drm_i915_gem_object *obj; 8200 enum pipe pipe = intel_crtc->pipe;
8048 unsigned old_width; 8201 unsigned old_width;
8049 uint32_t addr; 8202 uint32_t addr;
8050 int ret; 8203 int ret;
8051 8204
8052 /* if we want to turn off the cursor ignore width and height */ 8205 /* if we want to turn off the cursor ignore width and height */
8053 if (!handle) { 8206 if (!obj) {
8054 DRM_DEBUG_KMS("cursor off\n"); 8207 DRM_DEBUG_KMS("cursor off\n");
8055 addr = 0; 8208 addr = 0;
8056 obj = NULL; 8209 obj = NULL;
@@ -8066,12 +8219,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
8066 return -EINVAL; 8219 return -EINVAL;
8067 } 8220 }
8068 8221
8069 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
8070 if (&obj->base == NULL)
8071 return -ENOENT;
8072
8073 if (obj->base.size < width * height * 4) { 8222 if (obj->base.size < width * height * 4) {
8074 DRM_DEBUG_KMS("buffer is to small\n"); 8223 DRM_DEBUG_KMS("buffer is too small\n");
8075 ret = -ENOMEM; 8224 ret = -ENOMEM;
8076 goto fail; 8225 goto fail;
8077 } 8226 }
@@ -8126,9 +8275,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
8126 if (intel_crtc->cursor_bo) { 8275 if (intel_crtc->cursor_bo) {
8127 if (!INTEL_INFO(dev)->cursor_needs_physical) 8276 if (!INTEL_INFO(dev)->cursor_needs_physical)
8128 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); 8277 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
8129 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
8130 } 8278 }
8131 8279
8280 i915_gem_track_fb(intel_crtc->cursor_bo, obj,
8281 INTEL_FRONTBUFFER_CURSOR(pipe));
8132 mutex_unlock(&dev->struct_mutex); 8282 mutex_unlock(&dev->struct_mutex);
8133 8283
8134 old_width = intel_crtc->cursor_width; 8284 old_width = intel_crtc->cursor_width;
@@ -8144,6 +8294,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
8144 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); 8294 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
8145 } 8295 }
8146 8296
8297 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
8298
8147 return 0; 8299 return 0;
8148fail_unpin: 8300fail_unpin:
8149 i915_gem_object_unpin_from_display_plane(obj); 8301 i915_gem_object_unpin_from_display_plane(obj);
@@ -8154,19 +8306,6 @@ fail:
8154 return ret; 8306 return ret;
8155} 8307}
8156 8308
8157static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
8158{
8159 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8160
8161 intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
8162 intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
8163
8164 if (intel_crtc->active)
8165 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
8166
8167 return 0;
8168}
8169
8170static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 8309static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
8171 u16 *blue, uint32_t start, uint32_t size) 8310 u16 *blue, uint32_t start, uint32_t size)
8172{ 8311{
@@ -8242,7 +8381,7 @@ static u32
8242intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 8381intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
8243{ 8382{
8244 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 8383 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
8245 return ALIGN(pitch * mode->vdisplay, PAGE_SIZE); 8384 return PAGE_ALIGN(pitch * mode->vdisplay);
8246} 8385}
8247 8386
8248static struct drm_framebuffer * 8387static struct drm_framebuffer *
@@ -8667,16 +8806,14 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
8667 return mode; 8806 return mode;
8668} 8807}
8669 8808
8670static void intel_increase_pllclock(struct drm_crtc *crtc) 8809static void intel_increase_pllclock(struct drm_device *dev,
8810 enum pipe pipe)
8671{ 8811{
8672 struct drm_device *dev = crtc->dev;
8673 struct drm_i915_private *dev_priv = dev->dev_private; 8812 struct drm_i915_private *dev_priv = dev->dev_private;
8674 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8675 int pipe = intel_crtc->pipe;
8676 int dpll_reg = DPLL(pipe); 8813 int dpll_reg = DPLL(pipe);
8677 int dpll; 8814 int dpll;
8678 8815
8679 if (HAS_PCH_SPLIT(dev)) 8816 if (!HAS_GMCH_DISPLAY(dev))
8680 return; 8817 return;
8681 8818
8682 if (!dev_priv->lvds_downclock_avail) 8819 if (!dev_priv->lvds_downclock_avail)
@@ -8704,7 +8841,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
8704 struct drm_i915_private *dev_priv = dev->dev_private; 8841 struct drm_i915_private *dev_priv = dev->dev_private;
8705 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8842 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8706 8843
8707 if (HAS_PCH_SPLIT(dev)) 8844 if (!HAS_GMCH_DISPLAY(dev))
8708 return; 8845 return;
8709 8846
8710 if (!dev_priv->lvds_downclock_avail) 8847 if (!dev_priv->lvds_downclock_avail)
@@ -8773,28 +8910,179 @@ out:
8773 intel_runtime_pm_put(dev_priv); 8910 intel_runtime_pm_put(dev_priv);
8774} 8911}
8775 8912
8776void intel_mark_fb_busy(struct drm_i915_gem_object *obj, 8913
8777 struct intel_engine_cs *ring) 8914/**
8915 * intel_mark_fb_busy - mark given planes as busy
8916 * @dev: DRM device
8917 * @frontbuffer_bits: bits for the affected planes
8918 * @ring: optional ring for asynchronous commands
8919 *
8920 * This function gets called every time the screen contents change. It can be
8921 * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
8922 */
8923static void intel_mark_fb_busy(struct drm_device *dev,
8924 unsigned frontbuffer_bits,
8925 struct intel_engine_cs *ring)
8778{ 8926{
8779 struct drm_device *dev = obj->base.dev; 8927 enum pipe pipe;
8780 struct drm_crtc *crtc;
8781 8928
8782 if (!i915.powersave) 8929 if (!i915.powersave)
8783 return; 8930 return;
8784 8931
8785 for_each_crtc(dev, crtc) { 8932 for_each_pipe(pipe) {
8786 if (!crtc->primary->fb) 8933 if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
8787 continue;
8788
8789 if (to_intel_framebuffer(crtc->primary->fb)->obj != obj)
8790 continue; 8934 continue;
8791 8935
8792 intel_increase_pllclock(crtc); 8936 intel_increase_pllclock(dev, pipe);
8793 if (ring && intel_fbc_enabled(dev)) 8937 if (ring && intel_fbc_enabled(dev))
8794 ring->fbc_dirty = true; 8938 ring->fbc_dirty = true;
8795 } 8939 }
8796} 8940}
8797 8941
8942/**
8943 * intel_fb_obj_invalidate - invalidate frontbuffer object
8944 * @obj: GEM object to invalidate
8945 * @ring: set for asynchronous rendering
8946 *
8947 * This function gets called every time rendering on the given object starts and
8948 * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
8949 * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
8950 * until the rendering completes or a flip on this frontbuffer plane is
8951 * scheduled.
8952 */
8953void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
8954 struct intel_engine_cs *ring)
8955{
8956 struct drm_device *dev = obj->base.dev;
8957 struct drm_i915_private *dev_priv = dev->dev_private;
8958
8959 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
8960
8961 if (!obj->frontbuffer_bits)
8962 return;
8963
8964 if (ring) {
8965 mutex_lock(&dev_priv->fb_tracking.lock);
8966 dev_priv->fb_tracking.busy_bits
8967 |= obj->frontbuffer_bits;
8968 dev_priv->fb_tracking.flip_bits
8969 &= ~obj->frontbuffer_bits;
8970 mutex_unlock(&dev_priv->fb_tracking.lock);
8971 }
8972
8973 intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
8974
8975 intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
8976}
8977
8978/**
8979 * intel_frontbuffer_flush - flush frontbuffer
8980 * @dev: DRM device
8981 * @frontbuffer_bits: frontbuffer plane tracking bits
8982 *
8983 * This function gets called every time rendering on the given planes has
8984 * completed and frontbuffer caching can be started again. Flushes will get
8985 * delayed if they're blocked by some oustanding asynchronous rendering.
8986 *
8987 * Can be called without any locks held.
8988 */
8989void intel_frontbuffer_flush(struct drm_device *dev,
8990 unsigned frontbuffer_bits)
8991{
8992 struct drm_i915_private *dev_priv = dev->dev_private;
8993
8994 /* Delay flushing when rings are still busy.*/
8995 mutex_lock(&dev_priv->fb_tracking.lock);
8996 frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
8997 mutex_unlock(&dev_priv->fb_tracking.lock);
8998
8999 intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
9000
9001 intel_edp_psr_flush(dev, frontbuffer_bits);
9002}
9003
9004/**
9005 * intel_fb_obj_flush - flush frontbuffer object
9006 * @obj: GEM object to flush
9007 * @retire: set when retiring asynchronous rendering
9008 *
9009 * This function gets called every time rendering on the given object has
9010 * completed and frontbuffer caching can be started again. If @retire is true
9011 * then any delayed flushes will be unblocked.
9012 */
9013void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
9014 bool retire)
9015{
9016 struct drm_device *dev = obj->base.dev;
9017 struct drm_i915_private *dev_priv = dev->dev_private;
9018 unsigned frontbuffer_bits;
9019
9020 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
9021
9022 if (!obj->frontbuffer_bits)
9023 return;
9024
9025 frontbuffer_bits = obj->frontbuffer_bits;
9026
9027 if (retire) {
9028 mutex_lock(&dev_priv->fb_tracking.lock);
9029 /* Filter out new bits since rendering started. */
9030 frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
9031
9032 dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
9033 mutex_unlock(&dev_priv->fb_tracking.lock);
9034 }
9035
9036 intel_frontbuffer_flush(dev, frontbuffer_bits);
9037}
9038
9039/**
9040 * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
9041 * @dev: DRM device
9042 * @frontbuffer_bits: frontbuffer plane tracking bits
9043 *
9044 * This function gets called after scheduling a flip on @obj. The actual
9045 * frontbuffer flushing will be delayed until completion is signalled with
9046 * intel_frontbuffer_flip_complete. If an invalidate happens in between this
9047 * flush will be cancelled.
9048 *
9049 * Can be called without any locks held.
9050 */
9051void intel_frontbuffer_flip_prepare(struct drm_device *dev,
9052 unsigned frontbuffer_bits)
9053{
9054 struct drm_i915_private *dev_priv = dev->dev_private;
9055
9056 mutex_lock(&dev_priv->fb_tracking.lock);
9057 dev_priv->fb_tracking.flip_bits
9058 |= frontbuffer_bits;
9059 mutex_unlock(&dev_priv->fb_tracking.lock);
9060}
9061
9062/**
9063 * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
9064 * @dev: DRM device
9065 * @frontbuffer_bits: frontbuffer plane tracking bits
9066 *
9067 * This function gets called after the flip has been latched and will complete
9068 * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
9069 *
9070 * Can be called without any locks held.
9071 */
9072void intel_frontbuffer_flip_complete(struct drm_device *dev,
9073 unsigned frontbuffer_bits)
9074{
9075 struct drm_i915_private *dev_priv = dev->dev_private;
9076
9077 mutex_lock(&dev_priv->fb_tracking.lock);
9078 /* Mask any cancelled flips. */
9079 frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
9080 dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
9081 mutex_unlock(&dev_priv->fb_tracking.lock);
9082
9083 intel_frontbuffer_flush(dev, frontbuffer_bits);
9084}
9085
8798static void intel_crtc_destroy(struct drm_crtc *crtc) 9086static void intel_crtc_destroy(struct drm_crtc *crtc)
8799{ 9087{
8800 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9088 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -8812,8 +9100,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
8812 kfree(work); 9100 kfree(work);
8813 } 9101 }
8814 9102
8815 intel_crtc_cursor_set(crtc, NULL, 0, 0, 0);
8816
8817 drm_crtc_cleanup(crtc); 9103 drm_crtc_cleanup(crtc);
8818 9104
8819 kfree(intel_crtc); 9105 kfree(intel_crtc);
@@ -8824,6 +9110,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
8824 struct intel_unpin_work *work = 9110 struct intel_unpin_work *work =
8825 container_of(__work, struct intel_unpin_work, work); 9111 container_of(__work, struct intel_unpin_work, work);
8826 struct drm_device *dev = work->crtc->dev; 9112 struct drm_device *dev = work->crtc->dev;
9113 enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
8827 9114
8828 mutex_lock(&dev->struct_mutex); 9115 mutex_lock(&dev->struct_mutex);
8829 intel_unpin_fb_obj(work->old_fb_obj); 9116 intel_unpin_fb_obj(work->old_fb_obj);
@@ -8833,6 +9120,8 @@ static void intel_unpin_work_fn(struct work_struct *__work)
8833 intel_update_fbc(dev); 9120 intel_update_fbc(dev);
8834 mutex_unlock(&dev->struct_mutex); 9121 mutex_unlock(&dev->struct_mutex);
8835 9122
9123 intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
9124
8836 BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); 9125 BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
8837 atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); 9126 atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
8838 9127
@@ -9202,6 +9491,150 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
9202 return 0; 9491 return 0;
9203} 9492}
9204 9493
9494static bool use_mmio_flip(struct intel_engine_cs *ring,
9495 struct drm_i915_gem_object *obj)
9496{
9497 /*
9498 * This is not being used for older platforms, because
9499 * non-availability of flip done interrupt forces us to use
9500 * CS flips. Older platforms derive flip done using some clever
9501 * tricks involving the flip_pending status bits and vblank irqs.
9502 * So using MMIO flips there would disrupt this mechanism.
9503 */
9504
9505 if (ring == NULL)
9506 return true;
9507
9508 if (INTEL_INFO(ring->dev)->gen < 5)
9509 return false;
9510
9511 if (i915.use_mmio_flip < 0)
9512 return false;
9513 else if (i915.use_mmio_flip > 0)
9514 return true;
9515 else
9516 return ring != obj->ring;
9517}
9518
9519static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
9520{
9521 struct drm_device *dev = intel_crtc->base.dev;
9522 struct drm_i915_private *dev_priv = dev->dev_private;
9523 struct intel_framebuffer *intel_fb =
9524 to_intel_framebuffer(intel_crtc->base.primary->fb);
9525 struct drm_i915_gem_object *obj = intel_fb->obj;
9526 u32 dspcntr;
9527 u32 reg;
9528
9529 intel_mark_page_flip_active(intel_crtc);
9530
9531 reg = DSPCNTR(intel_crtc->plane);
9532 dspcntr = I915_READ(reg);
9533
9534 if (INTEL_INFO(dev)->gen >= 4) {
9535 if (obj->tiling_mode != I915_TILING_NONE)
9536 dspcntr |= DISPPLANE_TILED;
9537 else
9538 dspcntr &= ~DISPPLANE_TILED;
9539 }
9540 I915_WRITE(reg, dspcntr);
9541
9542 I915_WRITE(DSPSURF(intel_crtc->plane),
9543 intel_crtc->unpin_work->gtt_offset);
9544 POSTING_READ(DSPSURF(intel_crtc->plane));
9545}
9546
9547static int intel_postpone_flip(struct drm_i915_gem_object *obj)
9548{
9549 struct intel_engine_cs *ring;
9550 int ret;
9551
9552 lockdep_assert_held(&obj->base.dev->struct_mutex);
9553
9554 if (!obj->last_write_seqno)
9555 return 0;
9556
9557 ring = obj->ring;
9558
9559 if (i915_seqno_passed(ring->get_seqno(ring, true),
9560 obj->last_write_seqno))
9561 return 0;
9562
9563 ret = i915_gem_check_olr(ring, obj->last_write_seqno);
9564 if (ret)
9565 return ret;
9566
9567 if (WARN_ON(!ring->irq_get(ring)))
9568 return 0;
9569
9570 return 1;
9571}
9572
9573void intel_notify_mmio_flip(struct intel_engine_cs *ring)
9574{
9575 struct drm_i915_private *dev_priv = to_i915(ring->dev);
9576 struct intel_crtc *intel_crtc;
9577 unsigned long irq_flags;
9578 u32 seqno;
9579
9580 seqno = ring->get_seqno(ring, false);
9581
9582 spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
9583 for_each_intel_crtc(ring->dev, intel_crtc) {
9584 struct intel_mmio_flip *mmio_flip;
9585
9586 mmio_flip = &intel_crtc->mmio_flip;
9587 if (mmio_flip->seqno == 0)
9588 continue;
9589
9590 if (ring->id != mmio_flip->ring_id)
9591 continue;
9592
9593 if (i915_seqno_passed(seqno, mmio_flip->seqno)) {
9594 intel_do_mmio_flip(intel_crtc);
9595 mmio_flip->seqno = 0;
9596 ring->irq_put(ring);
9597 }
9598 }
9599 spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
9600}
9601
9602static int intel_queue_mmio_flip(struct drm_device *dev,
9603 struct drm_crtc *crtc,
9604 struct drm_framebuffer *fb,
9605 struct drm_i915_gem_object *obj,
9606 struct intel_engine_cs *ring,
9607 uint32_t flags)
9608{
9609 struct drm_i915_private *dev_priv = dev->dev_private;
9610 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9611 unsigned long irq_flags;
9612 int ret;
9613
9614 if (WARN_ON(intel_crtc->mmio_flip.seqno))
9615 return -EBUSY;
9616
9617 ret = intel_postpone_flip(obj);
9618 if (ret < 0)
9619 return ret;
9620 if (ret == 0) {
9621 intel_do_mmio_flip(intel_crtc);
9622 return 0;
9623 }
9624
9625 spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
9626 intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
9627 intel_crtc->mmio_flip.ring_id = obj->ring->id;
9628 spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
9629
9630 /*
9631 * Double check to catch cases where irq fired before
9632 * mmio flip data was ready
9633 */
9634 intel_notify_mmio_flip(obj->ring);
9635 return 0;
9636}
9637
9205static int intel_default_queue_flip(struct drm_device *dev, 9638static int intel_default_queue_flip(struct drm_device *dev,
9206 struct drm_crtc *crtc, 9639 struct drm_crtc *crtc,
9207 struct drm_framebuffer *fb, 9640 struct drm_framebuffer *fb,
@@ -9220,13 +9653,22 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9220 struct drm_device *dev = crtc->dev; 9653 struct drm_device *dev = crtc->dev;
9221 struct drm_i915_private *dev_priv = dev->dev_private; 9654 struct drm_i915_private *dev_priv = dev->dev_private;
9222 struct drm_framebuffer *old_fb = crtc->primary->fb; 9655 struct drm_framebuffer *old_fb = crtc->primary->fb;
9223 struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; 9656 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9224 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9657 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9658 enum pipe pipe = intel_crtc->pipe;
9225 struct intel_unpin_work *work; 9659 struct intel_unpin_work *work;
9226 struct intel_engine_cs *ring; 9660 struct intel_engine_cs *ring;
9227 unsigned long flags; 9661 unsigned long flags;
9228 int ret; 9662 int ret;
9229 9663
9664 /*
9665 * drm_mode_page_flip_ioctl() should already catch this, but double
9666 * check to be safe. In the future we may enable pageflipping from
9667 * a disabled primary plane.
9668 */
9669 if (WARN_ON(intel_fb_obj(old_fb) == NULL))
9670 return -EBUSY;
9671
9230 /* Can't change pixel format via MI display flips. */ 9672 /* Can't change pixel format via MI display flips. */
9231 if (fb->pixel_format != crtc->primary->fb->pixel_format) 9673 if (fb->pixel_format != crtc->primary->fb->pixel_format)
9232 return -EINVAL; 9674 return -EINVAL;
@@ -9249,7 +9691,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9249 9691
9250 work->event = event; 9692 work->event = event;
9251 work->crtc = crtc; 9693 work->crtc = crtc;
9252 work->old_fb_obj = to_intel_framebuffer(old_fb)->obj; 9694 work->old_fb_obj = intel_fb_obj(old_fb);
9253 INIT_WORK(&work->work, intel_unpin_work_fn); 9695 INIT_WORK(&work->work, intel_unpin_work_fn);
9254 9696
9255 ret = drm_crtc_vblank_get(crtc); 9697 ret = drm_crtc_vblank_get(crtc);
@@ -9290,10 +9732,15 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9290 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 9732 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
9291 9733
9292 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 9734 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
9293 work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(intel_crtc->pipe)) + 1; 9735 work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
9294 9736
9295 if (IS_VALLEYVIEW(dev)) { 9737 if (IS_VALLEYVIEW(dev)) {
9296 ring = &dev_priv->ring[BCS]; 9738 ring = &dev_priv->ring[BCS];
9739 if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
9740 /* vlv: DISPLAY_FLIP fails to change tiling */
9741 ring = NULL;
9742 } else if (IS_IVYBRIDGE(dev)) {
9743 ring = &dev_priv->ring[BCS];
9297 } else if (INTEL_INFO(dev)->gen >= 7) { 9744 } else if (INTEL_INFO(dev)->gen >= 7) {
9298 ring = obj->ring; 9745 ring = obj->ring;
9299 if (ring == NULL || ring->id != RCS) 9746 if (ring == NULL || ring->id != RCS)
@@ -9309,12 +9756,20 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9309 work->gtt_offset = 9756 work->gtt_offset =
9310 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset; 9757 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
9311 9758
9312 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, page_flip_flags); 9759 if (use_mmio_flip(ring, obj))
9760 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
9761 page_flip_flags);
9762 else
9763 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
9764 page_flip_flags);
9313 if (ret) 9765 if (ret)
9314 goto cleanup_unpin; 9766 goto cleanup_unpin;
9315 9767
9768 i915_gem_track_fb(work->old_fb_obj, obj,
9769 INTEL_FRONTBUFFER_PRIMARY(pipe));
9770
9316 intel_disable_fbc(dev); 9771 intel_disable_fbc(dev);
9317 intel_mark_fb_busy(obj, NULL); 9772 intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
9318 mutex_unlock(&dev->struct_mutex); 9773 mutex_unlock(&dev->struct_mutex);
9319 9774
9320 trace_i915_flip_request(intel_crtc->plane, obj); 9775 trace_i915_flip_request(intel_crtc->plane, obj);
@@ -9344,7 +9799,7 @@ out_hang:
9344 intel_crtc_wait_for_pending_flips(crtc); 9799 intel_crtc_wait_for_pending_flips(crtc);
9345 ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb); 9800 ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
9346 if (ret == 0 && event) 9801 if (ret == 0 && event)
9347 drm_send_vblank_event(dev, intel_crtc->pipe, event); 9802 drm_send_vblank_event(dev, pipe, event);
9348 } 9803 }
9349 return ret; 9804 return ret;
9350} 9805}
@@ -10017,11 +10472,14 @@ intel_pipe_config_compare(struct drm_device *dev,
10017 10472
10018 PIPE_CONF_CHECK_I(double_wide); 10473 PIPE_CONF_CHECK_I(double_wide);
10019 10474
10475 PIPE_CONF_CHECK_X(ddi_pll_sel);
10476
10020 PIPE_CONF_CHECK_I(shared_dpll); 10477 PIPE_CONF_CHECK_I(shared_dpll);
10021 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 10478 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
10022 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 10479 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
10023 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 10480 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
10024 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 10481 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
10482 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
10025 10483
10026 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 10484 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
10027 PIPE_CONF_CHECK_I(pipe_bpp); 10485 PIPE_CONF_CHECK_I(pipe_bpp);
@@ -10083,6 +10541,14 @@ check_encoder_state(struct drm_device *dev)
10083 if (connector->base.dpms != DRM_MODE_DPMS_OFF) 10541 if (connector->base.dpms != DRM_MODE_DPMS_OFF)
10084 active = true; 10542 active = true;
10085 } 10543 }
10544 /*
10545 * for MST connectors if we unplug the connector is gone
10546 * away but the encoder is still connected to a crtc
10547 * until a modeset happens in response to the hotplug.
10548 */
10549 if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
10550 continue;
10551
10086 WARN(!!encoder->base.crtc != enabled, 10552 WARN(!!encoder->base.crtc != enabled,
10087 "encoder's enabled state mismatch " 10553 "encoder's enabled state mismatch "
10088 "(expected %i, found %i)\n", 10554 "(expected %i, found %i)\n",
@@ -10378,20 +10844,23 @@ static int __intel_set_mode(struct drm_crtc *crtc,
10378 * on the DPLL. 10844 * on the DPLL.
10379 */ 10845 */
10380 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { 10846 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
10381 struct drm_framebuffer *old_fb; 10847 struct drm_framebuffer *old_fb = crtc->primary->fb;
10848 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
10849 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10382 10850
10383 mutex_lock(&dev->struct_mutex); 10851 mutex_lock(&dev->struct_mutex);
10384 ret = intel_pin_and_fence_fb_obj(dev, 10852 ret = intel_pin_and_fence_fb_obj(dev,
10385 to_intel_framebuffer(fb)->obj, 10853 obj,
10386 NULL); 10854 NULL);
10387 if (ret != 0) { 10855 if (ret != 0) {
10388 DRM_ERROR("pin & fence failed\n"); 10856 DRM_ERROR("pin & fence failed\n");
10389 mutex_unlock(&dev->struct_mutex); 10857 mutex_unlock(&dev->struct_mutex);
10390 goto done; 10858 goto done;
10391 } 10859 }
10392 old_fb = crtc->primary->fb;
10393 if (old_fb) 10860 if (old_fb)
10394 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); 10861 intel_unpin_fb_obj(old_obj);
10862 i915_gem_track_fb(old_obj, obj,
10863 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
10395 mutex_unlock(&dev->struct_mutex); 10864 mutex_unlock(&dev->struct_mutex);
10396 10865
10397 crtc->primary->fb = fb; 10866 crtc->primary->fb = fb;
@@ -10563,12 +11032,17 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
10563 if (is_crtc_connector_off(set)) { 11032 if (is_crtc_connector_off(set)) {
10564 config->mode_changed = true; 11033 config->mode_changed = true;
10565 } else if (set->crtc->primary->fb != set->fb) { 11034 } else if (set->crtc->primary->fb != set->fb) {
10566 /* If we have no fb then treat it as a full mode set */ 11035 /*
11036 * If we have no fb, we can only flip as long as the crtc is
11037 * active, otherwise we need a full mode set. The crtc may
11038 * be active if we've only disabled the primary plane, or
11039 * in fastboot situations.
11040 */
10567 if (set->crtc->primary->fb == NULL) { 11041 if (set->crtc->primary->fb == NULL) {
10568 struct intel_crtc *intel_crtc = 11042 struct intel_crtc *intel_crtc =
10569 to_intel_crtc(set->crtc); 11043 to_intel_crtc(set->crtc);
10570 11044
10571 if (intel_crtc->active && i915.fastboot) { 11045 if (intel_crtc->active) {
10572 DRM_DEBUG_KMS("crtc has no fb, will flip\n"); 11046 DRM_DEBUG_KMS("crtc has no fb, will flip\n");
10573 config->fb_changed = true; 11047 config->fb_changed = true;
10574 } else { 11048 } else {
@@ -10620,7 +11094,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
10620 * for them. */ 11094 * for them. */
10621 for (ro = 0; ro < set->num_connectors; ro++) { 11095 for (ro = 0; ro < set->num_connectors; ro++) {
10622 if (set->connectors[ro] == &connector->base) { 11096 if (set->connectors[ro] == &connector->base) {
10623 connector->new_encoder = connector->encoder; 11097 connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
10624 break; 11098 break;
10625 } 11099 }
10626 } 11100 }
@@ -10666,7 +11140,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
10666 new_crtc)) { 11140 new_crtc)) {
10667 return -EINVAL; 11141 return -EINVAL;
10668 } 11142 }
10669 connector->encoder->new_crtc = to_intel_crtc(new_crtc); 11143 connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
10670 11144
10671 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", 11145 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
10672 connector->base.base.id, 11146 connector->base.base.id,
@@ -10700,7 +11174,12 @@ intel_modeset_stage_output_state(struct drm_device *dev,
10700 } 11174 }
10701 } 11175 }
10702 /* Now we've also updated encoder->new_crtc for all encoders. */ 11176 /* Now we've also updated encoder->new_crtc for all encoders. */
10703 11177 list_for_each_entry(connector, &dev->mode_config.connector_list,
11178 base.head) {
11179 if (connector->new_encoder)
11180 if (connector->new_encoder != connector->encoder)
11181 connector->encoder = connector->new_encoder;
11182 }
10704 for_each_intel_crtc(dev, crtc) { 11183 for_each_intel_crtc(dev, crtc) {
10705 crtc->new_enabled = false; 11184 crtc->new_enabled = false;
10706 11185
@@ -10806,10 +11285,24 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
10806 ret = intel_set_mode(set->crtc, set->mode, 11285 ret = intel_set_mode(set->crtc, set->mode,
10807 set->x, set->y, set->fb); 11286 set->x, set->y, set->fb);
10808 } else if (config->fb_changed) { 11287 } else if (config->fb_changed) {
11288 struct drm_i915_private *dev_priv = dev->dev_private;
11289 struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
11290
10809 intel_crtc_wait_for_pending_flips(set->crtc); 11291 intel_crtc_wait_for_pending_flips(set->crtc);
10810 11292
10811 ret = intel_pipe_set_base(set->crtc, 11293 ret = intel_pipe_set_base(set->crtc,
10812 set->x, set->y, set->fb); 11294 set->x, set->y, set->fb);
11295
11296 /*
11297 * We need to make sure the primary plane is re-enabled if it
11298 * has previously been turned off.
11299 */
11300 if (!intel_crtc->primary_enabled && ret == 0) {
11301 WARN_ON(!intel_crtc->active);
11302 intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
11303 intel_crtc->pipe);
11304 }
11305
10813 /* 11306 /*
10814 * In the fastboot case this may be our only check of the 11307 * In the fastboot case this may be our only check of the
10815 * state after boot. It would be better to only do it on 11308 * state after boot. It would be better to only do it on
@@ -10850,26 +11343,21 @@ out_config:
10850} 11343}
10851 11344
10852static const struct drm_crtc_funcs intel_crtc_funcs = { 11345static const struct drm_crtc_funcs intel_crtc_funcs = {
10853 .cursor_set = intel_crtc_cursor_set,
10854 .cursor_move = intel_crtc_cursor_move,
10855 .gamma_set = intel_crtc_gamma_set, 11346 .gamma_set = intel_crtc_gamma_set,
10856 .set_config = intel_crtc_set_config, 11347 .set_config = intel_crtc_set_config,
10857 .destroy = intel_crtc_destroy, 11348 .destroy = intel_crtc_destroy,
10858 .page_flip = intel_crtc_page_flip, 11349 .page_flip = intel_crtc_page_flip,
10859}; 11350};
10860 11351
10861static void intel_cpu_pll_init(struct drm_device *dev)
10862{
10863 if (HAS_DDI(dev))
10864 intel_ddi_pll_init(dev);
10865}
10866
10867static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, 11352static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
10868 struct intel_shared_dpll *pll, 11353 struct intel_shared_dpll *pll,
10869 struct intel_dpll_hw_state *hw_state) 11354 struct intel_dpll_hw_state *hw_state)
10870{ 11355{
10871 uint32_t val; 11356 uint32_t val;
10872 11357
11358 if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
11359 return false;
11360
10873 val = I915_READ(PCH_DPLL(pll->id)); 11361 val = I915_READ(PCH_DPLL(pll->id));
10874 hw_state->dpll = val; 11362 hw_state->dpll = val;
10875 hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); 11363 hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
@@ -10951,7 +11439,9 @@ static void intel_shared_dpll_init(struct drm_device *dev)
10951{ 11439{
10952 struct drm_i915_private *dev_priv = dev->dev_private; 11440 struct drm_i915_private *dev_priv = dev->dev_private;
10953 11441
10954 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 11442 if (HAS_DDI(dev))
11443 intel_ddi_pll_init(dev);
11444 else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
10955 ibx_pch_dpll_init(dev); 11445 ibx_pch_dpll_init(dev);
10956 else 11446 else
10957 dev_priv->num_shared_dpll = 0; 11447 dev_priv->num_shared_dpll = 0;
@@ -10959,17 +11449,328 @@ static void intel_shared_dpll_init(struct drm_device *dev)
10959 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); 11449 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
10960} 11450}
10961 11451
11452static int
11453intel_primary_plane_disable(struct drm_plane *plane)
11454{
11455 struct drm_device *dev = plane->dev;
11456 struct drm_i915_private *dev_priv = dev->dev_private;
11457 struct intel_plane *intel_plane = to_intel_plane(plane);
11458 struct intel_crtc *intel_crtc;
11459
11460 if (!plane->fb)
11461 return 0;
11462
11463 BUG_ON(!plane->crtc);
11464
11465 intel_crtc = to_intel_crtc(plane->crtc);
11466
11467 /*
11468 * Even though we checked plane->fb above, it's still possible that
11469 * the primary plane has been implicitly disabled because the crtc
11470 * coordinates given weren't visible, or because we detected
11471 * that it was 100% covered by a sprite plane. Or, the CRTC may be
11472 * off and we've set a fb, but haven't actually turned on the CRTC yet.
11473 * In either case, we need to unpin the FB and let the fb pointer get
11474 * updated, but otherwise we don't need to touch the hardware.
11475 */
11476 if (!intel_crtc->primary_enabled)
11477 goto disable_unpin;
11478
11479 intel_crtc_wait_for_pending_flips(plane->crtc);
11480 intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
11481 intel_plane->pipe);
11482disable_unpin:
11483 mutex_lock(&dev->struct_mutex);
11484 i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
11485 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11486 intel_unpin_fb_obj(intel_fb_obj(plane->fb));
11487 mutex_unlock(&dev->struct_mutex);
11488 plane->fb = NULL;
11489
11490 return 0;
11491}
11492
11493static int
11494intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11495 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
11496 unsigned int crtc_w, unsigned int crtc_h,
11497 uint32_t src_x, uint32_t src_y,
11498 uint32_t src_w, uint32_t src_h)
11499{
11500 struct drm_device *dev = crtc->dev;
11501 struct drm_i915_private *dev_priv = dev->dev_private;
11502 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11503 struct intel_plane *intel_plane = to_intel_plane(plane);
11504 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11505 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
11506 struct drm_rect dest = {
11507 /* integer pixels */
11508 .x1 = crtc_x,
11509 .y1 = crtc_y,
11510 .x2 = crtc_x + crtc_w,
11511 .y2 = crtc_y + crtc_h,
11512 };
11513 struct drm_rect src = {
11514 /* 16.16 fixed point */
11515 .x1 = src_x,
11516 .y1 = src_y,
11517 .x2 = src_x + src_w,
11518 .y2 = src_y + src_h,
11519 };
11520 const struct drm_rect clip = {
11521 /* integer pixels */
11522 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
11523 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
11524 };
11525 bool visible;
11526 int ret;
11527
11528 ret = drm_plane_helper_check_update(plane, crtc, fb,
11529 &src, &dest, &clip,
11530 DRM_PLANE_HELPER_NO_SCALING,
11531 DRM_PLANE_HELPER_NO_SCALING,
11532 false, true, &visible);
11533
11534 if (ret)
11535 return ret;
11536
11537 /*
11538 * If the CRTC isn't enabled, we're just pinning the framebuffer,
11539 * updating the fb pointer, and returning without touching the
11540 * hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to
11541 * turn on the display with all planes setup as desired.
11542 */
11543 if (!crtc->enabled) {
11544 mutex_lock(&dev->struct_mutex);
11545
11546 /*
11547 * If we already called setplane while the crtc was disabled,
11548 * we may have an fb pinned; unpin it.
11549 */
11550 if (plane->fb)
11551 intel_unpin_fb_obj(old_obj);
11552
11553 i915_gem_track_fb(old_obj, obj,
11554 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11555
11556 /* Pin and return without programming hardware */
11557 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
11558 mutex_unlock(&dev->struct_mutex);
11559
11560 return ret;
11561 }
11562
11563 intel_crtc_wait_for_pending_flips(crtc);
11564
11565 /*
11566 * If clipping results in a non-visible primary plane, we'll disable
11567 * the primary plane. Note that this is a bit different than what
11568 * happens if userspace explicitly disables the plane by passing fb=0
11569 * because plane->fb still gets set and pinned.
11570 */
11571 if (!visible) {
11572 mutex_lock(&dev->struct_mutex);
11573
11574 /*
11575 * Try to pin the new fb first so that we can bail out if we
11576 * fail.
11577 */
11578 if (plane->fb != fb) {
11579 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
11580 if (ret) {
11581 mutex_unlock(&dev->struct_mutex);
11582 return ret;
11583 }
11584 }
11585
11586 i915_gem_track_fb(old_obj, obj,
11587 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11588
11589 if (intel_crtc->primary_enabled)
11590 intel_disable_primary_hw_plane(dev_priv,
11591 intel_plane->plane,
11592 intel_plane->pipe);
11593
11594
11595 if (plane->fb != fb)
11596 if (plane->fb)
11597 intel_unpin_fb_obj(old_obj);
11598
11599 mutex_unlock(&dev->struct_mutex);
11600
11601 return 0;
11602 }
11603
11604 ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
11605 if (ret)
11606 return ret;
11607
11608 if (!intel_crtc->primary_enabled)
11609 intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
11610 intel_crtc->pipe);
11611
11612 return 0;
11613}
11614
11615/* Common destruction function for both primary and cursor planes */
11616static void intel_plane_destroy(struct drm_plane *plane)
11617{
11618 struct intel_plane *intel_plane = to_intel_plane(plane);
11619 drm_plane_cleanup(plane);
11620 kfree(intel_plane);
11621}
11622
11623static const struct drm_plane_funcs intel_primary_plane_funcs = {
11624 .update_plane = intel_primary_plane_setplane,
11625 .disable_plane = intel_primary_plane_disable,
11626 .destroy = intel_plane_destroy,
11627};
11628
11629static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
11630 int pipe)
11631{
11632 struct intel_plane *primary;
11633 const uint32_t *intel_primary_formats;
11634 int num_formats;
11635
11636 primary = kzalloc(sizeof(*primary), GFP_KERNEL);
11637 if (primary == NULL)
11638 return NULL;
11639
11640 primary->can_scale = false;
11641 primary->max_downscale = 1;
11642 primary->pipe = pipe;
11643 primary->plane = pipe;
11644 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
11645 primary->plane = !pipe;
11646
11647 if (INTEL_INFO(dev)->gen <= 3) {
11648 intel_primary_formats = intel_primary_formats_gen2;
11649 num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
11650 } else {
11651 intel_primary_formats = intel_primary_formats_gen4;
11652 num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
11653 }
11654
11655 drm_universal_plane_init(dev, &primary->base, 0,
11656 &intel_primary_plane_funcs,
11657 intel_primary_formats, num_formats,
11658 DRM_PLANE_TYPE_PRIMARY);
11659 return &primary->base;
11660}
11661
11662static int
11663intel_cursor_plane_disable(struct drm_plane *plane)
11664{
11665 if (!plane->fb)
11666 return 0;
11667
11668 BUG_ON(!plane->crtc);
11669
11670 return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
11671}
11672
11673static int
11674intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
11675 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
11676 unsigned int crtc_w, unsigned int crtc_h,
11677 uint32_t src_x, uint32_t src_y,
11678 uint32_t src_w, uint32_t src_h)
11679{
11680 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11681 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11682 struct drm_i915_gem_object *obj = intel_fb->obj;
11683 struct drm_rect dest = {
11684 /* integer pixels */
11685 .x1 = crtc_x,
11686 .y1 = crtc_y,
11687 .x2 = crtc_x + crtc_w,
11688 .y2 = crtc_y + crtc_h,
11689 };
11690 struct drm_rect src = {
11691 /* 16.16 fixed point */
11692 .x1 = src_x,
11693 .y1 = src_y,
11694 .x2 = src_x + src_w,
11695 .y2 = src_y + src_h,
11696 };
11697 const struct drm_rect clip = {
11698 /* integer pixels */
11699 .x2 = intel_crtc->config.pipe_src_w,
11700 .y2 = intel_crtc->config.pipe_src_h,
11701 };
11702 bool visible;
11703 int ret;
11704
11705 ret = drm_plane_helper_check_update(plane, crtc, fb,
11706 &src, &dest, &clip,
11707 DRM_PLANE_HELPER_NO_SCALING,
11708 DRM_PLANE_HELPER_NO_SCALING,
11709 true, true, &visible);
11710 if (ret)
11711 return ret;
11712
11713 crtc->cursor_x = crtc_x;
11714 crtc->cursor_y = crtc_y;
11715 if (fb != crtc->cursor->fb) {
11716 return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
11717 } else {
11718 intel_crtc_update_cursor(crtc, visible);
11719 return 0;
11720 }
11721}
11722static const struct drm_plane_funcs intel_cursor_plane_funcs = {
11723 .update_plane = intel_cursor_plane_update,
11724 .disable_plane = intel_cursor_plane_disable,
11725 .destroy = intel_plane_destroy,
11726};
11727
11728static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
11729 int pipe)
11730{
11731 struct intel_plane *cursor;
11732
11733 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
11734 if (cursor == NULL)
11735 return NULL;
11736
11737 cursor->can_scale = false;
11738 cursor->max_downscale = 1;
11739 cursor->pipe = pipe;
11740 cursor->plane = pipe;
11741
11742 drm_universal_plane_init(dev, &cursor->base, 0,
11743 &intel_cursor_plane_funcs,
11744 intel_cursor_formats,
11745 ARRAY_SIZE(intel_cursor_formats),
11746 DRM_PLANE_TYPE_CURSOR);
11747 return &cursor->base;
11748}
11749
10962static void intel_crtc_init(struct drm_device *dev, int pipe) 11750static void intel_crtc_init(struct drm_device *dev, int pipe)
10963{ 11751{
10964 struct drm_i915_private *dev_priv = dev->dev_private; 11752 struct drm_i915_private *dev_priv = dev->dev_private;
10965 struct intel_crtc *intel_crtc; 11753 struct intel_crtc *intel_crtc;
10966 int i; 11754 struct drm_plane *primary = NULL;
11755 struct drm_plane *cursor = NULL;
11756 int i, ret;
10967 11757
10968 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 11758 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
10969 if (intel_crtc == NULL) 11759 if (intel_crtc == NULL)
10970 return; 11760 return;
10971 11761
10972 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); 11762 primary = intel_primary_plane_create(dev, pipe);
11763 if (!primary)
11764 goto fail;
11765
11766 cursor = intel_cursor_plane_create(dev, pipe);
11767 if (!cursor)
11768 goto fail;
11769
11770 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
11771 cursor, &intel_crtc_funcs);
11772 if (ret)
11773 goto fail;
10973 11774
10974 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 11775 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
10975 for (i = 0; i < 256; i++) { 11776 for (i = 0; i < 256; i++) {
@@ -10980,7 +11781,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
10980 11781
10981 /* 11782 /*
10982 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port 11783 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
10983 * is hooked to plane B. Hence we want plane A feeding pipe B. 11784 * is hooked to pipe B. Hence we want plane A feeding pipe B.
10984 */ 11785 */
10985 intel_crtc->pipe = pipe; 11786 intel_crtc->pipe = pipe;
10986 intel_crtc->plane = pipe; 11787 intel_crtc->plane = pipe;
@@ -11002,6 +11803,14 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
11002 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 11803 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
11003 11804
11004 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 11805 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
11806 return;
11807
11808fail:
11809 if (primary)
11810 drm_plane_cleanup(primary);
11811 if (cursor)
11812 drm_plane_cleanup(cursor);
11813 kfree(intel_crtc);
11005} 11814}
11006 11815
11007enum pipe intel_get_pipe_from_connector(struct intel_connector *connector) 11816enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
@@ -11021,21 +11830,20 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
11021 struct drm_file *file) 11830 struct drm_file *file)
11022{ 11831{
11023 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 11832 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
11024 struct drm_mode_object *drmmode_obj; 11833 struct drm_crtc *drmmode_crtc;
11025 struct intel_crtc *crtc; 11834 struct intel_crtc *crtc;
11026 11835
11027 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 11836 if (!drm_core_check_feature(dev, DRIVER_MODESET))
11028 return -ENODEV; 11837 return -ENODEV;
11029 11838
11030 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, 11839 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
11031 DRM_MODE_OBJECT_CRTC);
11032 11840
11033 if (!drmmode_obj) { 11841 if (!drmmode_crtc) {
11034 DRM_ERROR("no such CRTC id\n"); 11842 DRM_ERROR("no such CRTC id\n");
11035 return -ENOENT; 11843 return -ENOENT;
11036 } 11844 }
11037 11845
11038 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); 11846 crtc = to_intel_crtc(drmmode_crtc);
11039 pipe_from_crtc_id->pipe = crtc->pipe; 11847 pipe_from_crtc_id->pipe = crtc->pipe;
11040 11848
11041 return 0; 11849 return 0;
@@ -11236,6 +12044,8 @@ static void intel_setup_outputs(struct drm_device *dev)
11236 if (SUPPORTS_TV(dev)) 12044 if (SUPPORTS_TV(dev))
11237 intel_tv_init(dev); 12045 intel_tv_init(dev);
11238 12046
12047 intel_edp_psr_init(dev);
12048
11239 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 12049 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
11240 encoder->base.possible_crtcs = encoder->crtc_mask; 12050 encoder->base.possible_crtcs = encoder->crtc_mask;
11241 encoder->base.possible_clones = 12051 encoder->base.possible_clones =
@@ -11249,11 +12059,14 @@ static void intel_setup_outputs(struct drm_device *dev)
11249 12059
11250static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 12060static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
11251{ 12061{
12062 struct drm_device *dev = fb->dev;
11252 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 12063 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11253 12064
11254 drm_framebuffer_cleanup(fb); 12065 drm_framebuffer_cleanup(fb);
12066 mutex_lock(&dev->struct_mutex);
11255 WARN_ON(!intel_fb->obj->framebuffer_references--); 12067 WARN_ON(!intel_fb->obj->framebuffer_references--);
11256 drm_gem_object_unreference_unlocked(&intel_fb->obj->base); 12068 drm_gem_object_unreference(&intel_fb->obj->base);
12069 mutex_unlock(&dev->struct_mutex);
11257 kfree(intel_fb); 12070 kfree(intel_fb);
11258} 12071}
11259 12072
@@ -11438,7 +12251,7 @@ static void intel_init_display(struct drm_device *dev)
11438 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; 12251 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
11439 dev_priv->display.crtc_enable = haswell_crtc_enable; 12252 dev_priv->display.crtc_enable = haswell_crtc_enable;
11440 dev_priv->display.crtc_disable = haswell_crtc_disable; 12253 dev_priv->display.crtc_disable = haswell_crtc_disable;
11441 dev_priv->display.off = haswell_crtc_off; 12254 dev_priv->display.off = ironlake_crtc_off;
11442 dev_priv->display.update_primary_plane = 12255 dev_priv->display.update_primary_plane =
11443 ironlake_update_primary_plane; 12256 ironlake_update_primary_plane;
11444 } else if (HAS_PCH_SPLIT(dev)) { 12257 } else if (HAS_PCH_SPLIT(dev)) {
@@ -11722,6 +12535,9 @@ void intel_modeset_init_hw(struct drm_device *dev)
11722{ 12535{
11723 intel_prepare_ddi(dev); 12536 intel_prepare_ddi(dev);
11724 12537
12538 if (IS_VALLEYVIEW(dev))
12539 vlv_update_cdclk(dev);
12540
11725 intel_init_clock_gating(dev); 12541 intel_init_clock_gating(dev);
11726 12542
11727 intel_reset_dpio(dev); 12543 intel_reset_dpio(dev);
@@ -11798,7 +12614,6 @@ void intel_modeset_init(struct drm_device *dev)
11798 intel_init_dpio(dev); 12614 intel_init_dpio(dev);
11799 intel_reset_dpio(dev); 12615 intel_reset_dpio(dev);
11800 12616
11801 intel_cpu_pll_init(dev);
11802 intel_shared_dpll_init(dev); 12617 intel_shared_dpll_init(dev);
11803 12618
11804 /* Just disable it once at startup */ 12619 /* Just disable it once at startup */
@@ -12024,6 +12839,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
12024 encoder->base.base.id, 12839 encoder->base.base.id,
12025 encoder->base.name); 12840 encoder->base.name);
12026 encoder->disable(encoder); 12841 encoder->disable(encoder);
12842 if (encoder->post_disable)
12843 encoder->post_disable(encoder);
12027 } 12844 }
12028 encoder->base.crtc = NULL; 12845 encoder->base.crtc = NULL;
12029 encoder->connectors_active = false; 12846 encoder->connectors_active = false;
@@ -12108,10 +12925,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
12108 crtc->active ? "enabled" : "disabled"); 12925 crtc->active ? "enabled" : "disabled");
12109 } 12926 }
12110 12927
12111 /* FIXME: Smash this into the new shared dpll infrastructure. */
12112 if (HAS_DDI(dev))
12113 intel_ddi_setup_hw_pll_state(dev);
12114
12115 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 12928 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12116 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 12929 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12117 12930
@@ -12125,6 +12938,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
12125 12938
12126 DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n", 12939 DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
12127 pll->name, pll->refcount, pll->on); 12940 pll->name, pll->refcount, pll->on);
12941
12942 if (pll->refcount)
12943 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
12128 } 12944 }
12129 12945
12130 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 12946 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
@@ -12242,7 +13058,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
12242void intel_modeset_gem_init(struct drm_device *dev) 13058void intel_modeset_gem_init(struct drm_device *dev)
12243{ 13059{
12244 struct drm_crtc *c; 13060 struct drm_crtc *c;
12245 struct intel_framebuffer *fb; 13061 struct drm_i915_gem_object *obj;
12246 13062
12247 mutex_lock(&dev->struct_mutex); 13063 mutex_lock(&dev->struct_mutex);
12248 intel_init_gt_powersave(dev); 13064 intel_init_gt_powersave(dev);
@@ -12259,11 +13075,11 @@ void intel_modeset_gem_init(struct drm_device *dev)
12259 */ 13075 */
12260 mutex_lock(&dev->struct_mutex); 13076 mutex_lock(&dev->struct_mutex);
12261 for_each_crtc(dev, c) { 13077 for_each_crtc(dev, c) {
12262 if (!c->primary->fb) 13078 obj = intel_fb_obj(c->primary->fb);
13079 if (obj == NULL)
12263 continue; 13080 continue;
12264 13081
12265 fb = to_intel_framebuffer(c->primary->fb); 13082 if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) {
12266 if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) {
12267 DRM_ERROR("failed to pin boot fb on pipe %d\n", 13083 DRM_ERROR("failed to pin boot fb on pipe %d\n",
12268 to_intel_crtc(c)->pipe); 13084 to_intel_crtc(c)->pipe);
12269 drm_framebuffer_unreference(c->primary->fb); 13085 drm_framebuffer_unreference(c->primary->fb);
@@ -12278,13 +13094,12 @@ void intel_connector_unregister(struct intel_connector *intel_connector)
12278 struct drm_connector *connector = &intel_connector->base; 13094 struct drm_connector *connector = &intel_connector->base;
12279 13095
12280 intel_panel_destroy_backlight(connector); 13096 intel_panel_destroy_backlight(connector);
12281 drm_sysfs_connector_remove(connector); 13097 drm_connector_unregister(connector);
12282} 13098}
12283 13099
12284void intel_modeset_cleanup(struct drm_device *dev) 13100void intel_modeset_cleanup(struct drm_device *dev)
12285{ 13101{
12286 struct drm_i915_private *dev_priv = dev->dev_private; 13102 struct drm_i915_private *dev_priv = dev->dev_private;
12287 struct drm_crtc *crtc;
12288 struct drm_connector *connector; 13103 struct drm_connector *connector;
12289 13104
12290 /* 13105 /*
@@ -12294,6 +13109,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
12294 */ 13109 */
12295 drm_irq_uninstall(dev); 13110 drm_irq_uninstall(dev);
12296 cancel_work_sync(&dev_priv->hotplug_work); 13111 cancel_work_sync(&dev_priv->hotplug_work);
13112 dev_priv->pm._irqs_disabled = true;
13113
12297 /* 13114 /*
12298 * Due to the hpd irq storm handling the hotplug work can re-arm the 13115 * Due to the hpd irq storm handling the hotplug work can re-arm the
12299 * poll handlers. Hence disable polling after hpd handling is shut down. 13116 * poll handlers. Hence disable polling after hpd handling is shut down.
@@ -12304,14 +13121,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
12304 13121
12305 intel_unregister_dsm_handler(); 13122 intel_unregister_dsm_handler();
12306 13123
12307 for_each_crtc(dev, crtc) {
12308 /* Skip inactive CRTCs */
12309 if (!crtc->primary->fb)
12310 continue;
12311
12312 intel_increase_pllclock(crtc);
12313 }
12314
12315 intel_disable_fbc(dev); 13124 intel_disable_fbc(dev);
12316 13125
12317 intel_disable_gt_powersave(dev); 13126 intel_disable_gt_powersave(dev);
@@ -12479,7 +13288,7 @@ intel_display_capture_error_state(struct drm_device *dev)
12479 13288
12480 error->pipe[i].source = I915_READ(PIPESRC(i)); 13289 error->pipe[i].source = I915_READ(PIPESRC(i));
12481 13290
12482 if (!HAS_PCH_SPLIT(dev)) 13291 if (HAS_GMCH_DISPLAY(dev))
12483 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 13292 error->pipe[i].stat = I915_READ(PIPESTAT(i));
12484 } 13293 }
12485 13294
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8a1a4fbc06ac..eb52ecfe14cf 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -114,7 +114,7 @@ static void intel_dp_link_down(struct intel_dp *intel_dp);
114static bool _edp_panel_vdd_on(struct intel_dp *intel_dp); 114static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
115static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 115static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
116 116
117static int 117int
118intel_dp_max_link_bw(struct intel_dp *intel_dp) 118intel_dp_max_link_bw(struct intel_dp *intel_dp)
119{ 119{
120 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 120 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -773,12 +773,29 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
773{ 773{
774 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base); 774 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
775 775
776 sysfs_remove_link(&intel_connector->base.kdev->kobj, 776 if (!intel_connector->mst_port)
777 intel_dp->aux.ddc.dev.kobj.name); 777 sysfs_remove_link(&intel_connector->base.kdev->kobj,
778 intel_dp->aux.ddc.dev.kobj.name);
778 intel_connector_unregister(intel_connector); 779 intel_connector_unregister(intel_connector);
779} 780}
780 781
781static void 782static void
783hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
784{
785 switch (link_bw) {
786 case DP_LINK_BW_1_62:
787 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
788 break;
789 case DP_LINK_BW_2_7:
790 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
791 break;
792 case DP_LINK_BW_5_4:
793 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
794 break;
795 }
796}
797
798static void
782intel_dp_set_clock(struct intel_encoder *encoder, 799intel_dp_set_clock(struct intel_encoder *encoder,
783 struct intel_crtc_config *pipe_config, int link_bw) 800 struct intel_crtc_config *pipe_config, int link_bw)
784{ 801{
@@ -789,8 +806,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
789 if (IS_G4X(dev)) { 806 if (IS_G4X(dev)) {
790 divisor = gen4_dpll; 807 divisor = gen4_dpll;
791 count = ARRAY_SIZE(gen4_dpll); 808 count = ARRAY_SIZE(gen4_dpll);
792 } else if (IS_HASWELL(dev)) {
793 /* Haswell has special-purpose DP DDI clocks. */
794 } else if (HAS_PCH_SPLIT(dev)) { 809 } else if (HAS_PCH_SPLIT(dev)) {
795 divisor = pch_dpll; 810 divisor = pch_dpll;
796 count = ARRAY_SIZE(pch_dpll); 811 count = ARRAY_SIZE(pch_dpll);
@@ -961,7 +976,10 @@ found:
961 &pipe_config->dp_m2_n2); 976 &pipe_config->dp_m2_n2);
962 } 977 }
963 978
964 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); 979 if (HAS_DDI(dev))
980 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
981 else
982 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
965 983
966 return true; 984 return true;
967} 985}
@@ -1349,8 +1367,6 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
1349 1367
1350 DRM_DEBUG_KMS("Turn eDP power off\n"); 1368 DRM_DEBUG_KMS("Turn eDP power off\n");
1351 1369
1352 edp_wait_backlight_off(intel_dp);
1353
1354 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1370 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1355 1371
1356 pp = ironlake_get_pp_control(intel_dp); 1372 pp = ironlake_get_pp_control(intel_dp);
@@ -1386,6 +1402,9 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
1386 return; 1402 return;
1387 1403
1388 DRM_DEBUG_KMS("\n"); 1404 DRM_DEBUG_KMS("\n");
1405
1406 intel_panel_enable_backlight(intel_dp->attached_connector);
1407
1389 /* 1408 /*
1390 * If we enable the backlight right away following a panel power 1409 * If we enable the backlight right away following a panel power
1391 * on, we may see slight flicker as the panel syncs with the eDP 1410 * on, we may see slight flicker as the panel syncs with the eDP
@@ -1400,8 +1419,6 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
1400 1419
1401 I915_WRITE(pp_ctrl_reg, pp); 1420 I915_WRITE(pp_ctrl_reg, pp);
1402 POSTING_READ(pp_ctrl_reg); 1421 POSTING_READ(pp_ctrl_reg);
1403
1404 intel_panel_enable_backlight(intel_dp->attached_connector);
1405} 1422}
1406 1423
1407void intel_edp_backlight_off(struct intel_dp *intel_dp) 1424void intel_edp_backlight_off(struct intel_dp *intel_dp)
@@ -1414,8 +1431,6 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
1414 if (!is_edp(intel_dp)) 1431 if (!is_edp(intel_dp))
1415 return; 1432 return;
1416 1433
1417 intel_panel_disable_backlight(intel_dp->attached_connector);
1418
1419 DRM_DEBUG_KMS("\n"); 1434 DRM_DEBUG_KMS("\n");
1420 pp = ironlake_get_pp_control(intel_dp); 1435 pp = ironlake_get_pp_control(intel_dp);
1421 pp &= ~EDP_BLC_ENABLE; 1436 pp &= ~EDP_BLC_ENABLE;
@@ -1425,6 +1440,10 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
1425 I915_WRITE(pp_ctrl_reg, pp); 1440 I915_WRITE(pp_ctrl_reg, pp);
1426 POSTING_READ(pp_ctrl_reg); 1441 POSTING_READ(pp_ctrl_reg);
1427 intel_dp->last_backlight_off = jiffies; 1442 intel_dp->last_backlight_off = jiffies;
1443
1444 edp_wait_backlight_off(intel_dp);
1445
1446 intel_panel_disable_backlight(intel_dp->attached_connector);
1428} 1447}
1429 1448
1430static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1449static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
@@ -1646,11 +1665,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1646 } 1665 }
1647} 1666}
1648 1667
1649static bool is_edp_psr(struct drm_device *dev) 1668static bool is_edp_psr(struct intel_dp *intel_dp)
1650{ 1669{
1651 struct drm_i915_private *dev_priv = dev->dev_private; 1670 return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
1652
1653 return dev_priv->psr.sink_support;
1654} 1671}
1655 1672
1656static bool intel_edp_is_psr_enabled(struct drm_device *dev) 1673static bool intel_edp_is_psr_enabled(struct drm_device *dev)
@@ -1698,9 +1715,6 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1698 struct drm_i915_private *dev_priv = dev->dev_private; 1715 struct drm_i915_private *dev_priv = dev->dev_private;
1699 struct edp_vsc_psr psr_vsc; 1716 struct edp_vsc_psr psr_vsc;
1700 1717
1701 if (intel_dp->psr_setup_done)
1702 return;
1703
1704 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ 1718 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
1705 memset(&psr_vsc, 0, sizeof(psr_vsc)); 1719 memset(&psr_vsc, 0, sizeof(psr_vsc));
1706 psr_vsc.sdp_header.HB0 = 0; 1720 psr_vsc.sdp_header.HB0 = 0;
@@ -1712,22 +1726,25 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1712 /* Avoid continuous PSR exit by masking memup and hpd */ 1726 /* Avoid continuous PSR exit by masking memup and hpd */
1713 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | 1727 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
1714 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); 1728 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
1715
1716 intel_dp->psr_setup_done = true;
1717} 1729}
1718 1730
1719static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) 1731static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1720{ 1732{
1721 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1733 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1734 struct drm_device *dev = dig_port->base.base.dev;
1722 struct drm_i915_private *dev_priv = dev->dev_private; 1735 struct drm_i915_private *dev_priv = dev->dev_private;
1723 uint32_t aux_clock_divider; 1736 uint32_t aux_clock_divider;
1724 int precharge = 0x3; 1737 int precharge = 0x3;
1725 int msg_size = 5; /* Header(4) + Message(1) */ 1738 int msg_size = 5; /* Header(4) + Message(1) */
1739 bool only_standby = false;
1726 1740
1727 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); 1741 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
1728 1742
1743 if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
1744 only_standby = true;
1745
1729 /* Enable PSR in sink */ 1746 /* Enable PSR in sink */
1730 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) 1747 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
1731 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 1748 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
1732 DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE); 1749 DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
1733 else 1750 else
@@ -1746,18 +1763,24 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1746 1763
1747static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) 1764static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1748{ 1765{
1749 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1766 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1767 struct drm_device *dev = dig_port->base.base.dev;
1750 struct drm_i915_private *dev_priv = dev->dev_private; 1768 struct drm_i915_private *dev_priv = dev->dev_private;
1751 uint32_t max_sleep_time = 0x1f; 1769 uint32_t max_sleep_time = 0x1f;
1752 uint32_t idle_frames = 1; 1770 uint32_t idle_frames = 1;
1753 uint32_t val = 0x0; 1771 uint32_t val = 0x0;
1754 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; 1772 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
1773 bool only_standby = false;
1755 1774
1756 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { 1775 if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
1776 only_standby = true;
1777
1778 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
1757 val |= EDP_PSR_LINK_STANDBY; 1779 val |= EDP_PSR_LINK_STANDBY;
1758 val |= EDP_PSR_TP2_TP3_TIME_0us; 1780 val |= EDP_PSR_TP2_TP3_TIME_0us;
1759 val |= EDP_PSR_TP1_TIME_0us; 1781 val |= EDP_PSR_TP1_TIME_0us;
1760 val |= EDP_PSR_SKIP_AUX_EXIT; 1782 val |= EDP_PSR_SKIP_AUX_EXIT;
1783 val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
1761 } else 1784 } else
1762 val |= EDP_PSR_LINK_DISABLE; 1785 val |= EDP_PSR_LINK_DISABLE;
1763 1786
@@ -1775,18 +1798,15 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1775 struct drm_i915_private *dev_priv = dev->dev_private; 1798 struct drm_i915_private *dev_priv = dev->dev_private;
1776 struct drm_crtc *crtc = dig_port->base.base.crtc; 1799 struct drm_crtc *crtc = dig_port->base.base.crtc;
1777 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1800 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1778 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj;
1779 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1780 1801
1781 dev_priv->psr.source_ok = false; 1802 lockdep_assert_held(&dev_priv->psr.lock);
1803 lockdep_assert_held(&dev->struct_mutex);
1804 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
1805 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
1782 1806
1783 if (!HAS_PSR(dev)) { 1807 dev_priv->psr.source_ok = false;
1784 DRM_DEBUG_KMS("PSR not supported on this platform\n");
1785 return false;
1786 }
1787 1808
1788 if ((intel_encoder->type != INTEL_OUTPUT_EDP) || 1809 if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
1789 (dig_port->port != PORT_A)) {
1790 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); 1810 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1791 return false; 1811 return false;
1792 } 1812 }
@@ -1796,29 +1816,9 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1796 return false; 1816 return false;
1797 } 1817 }
1798 1818
1799 crtc = dig_port->base.base.crtc; 1819 /* Below limitations aren't valid for Broadwell */
1800 if (crtc == NULL) { 1820 if (IS_BROADWELL(dev))
1801 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1821 goto out;
1802 return false;
1803 }
1804
1805 intel_crtc = to_intel_crtc(crtc);
1806 if (!intel_crtc_active(crtc)) {
1807 DRM_DEBUG_KMS("crtc not active for PSR\n");
1808 return false;
1809 }
1810
1811 obj = to_intel_framebuffer(crtc->primary->fb)->obj;
1812 if (obj->tiling_mode != I915_TILING_X ||
1813 obj->fence_reg == I915_FENCE_REG_NONE) {
1814 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
1815 return false;
1816 }
1817
1818 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1819 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
1820 return false;
1821 }
1822 1822
1823 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & 1823 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1824 S3D_ENABLE) { 1824 S3D_ENABLE) {
@@ -1831,35 +1831,60 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1831 return false; 1831 return false;
1832 } 1832 }
1833 1833
1834 out:
1834 dev_priv->psr.source_ok = true; 1835 dev_priv->psr.source_ok = true;
1835 return true; 1836 return true;
1836} 1837}
1837 1838
1838static void intel_edp_psr_do_enable(struct intel_dp *intel_dp) 1839static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
1839{ 1840{
1840 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1841 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1841 1842 struct drm_device *dev = intel_dig_port->base.base.dev;
1842 if (!intel_edp_psr_match_conditions(intel_dp) || 1843 struct drm_i915_private *dev_priv = dev->dev_private;
1843 intel_edp_is_psr_enabled(dev))
1844 return;
1845 1844
1846 /* Setup PSR once */ 1845 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
1847 intel_edp_psr_setup(intel_dp); 1846 WARN_ON(dev_priv->psr.active);
1847 lockdep_assert_held(&dev_priv->psr.lock);
1848 1848
1849 /* Enable PSR on the panel */ 1849 /* Enable PSR on the panel */
1850 intel_edp_psr_enable_sink(intel_dp); 1850 intel_edp_psr_enable_sink(intel_dp);
1851 1851
1852 /* Enable PSR on the host */ 1852 /* Enable PSR on the host */
1853 intel_edp_psr_enable_source(intel_dp); 1853 intel_edp_psr_enable_source(intel_dp);
1854
1855 dev_priv->psr.active = true;
1854} 1856}
1855 1857
1856void intel_edp_psr_enable(struct intel_dp *intel_dp) 1858void intel_edp_psr_enable(struct intel_dp *intel_dp)
1857{ 1859{
1858 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1860 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1861 struct drm_i915_private *dev_priv = dev->dev_private;
1862
1863 if (!HAS_PSR(dev)) {
1864 DRM_DEBUG_KMS("PSR not supported on this platform\n");
1865 return;
1866 }
1867
1868 if (!is_edp_psr(intel_dp)) {
1869 DRM_DEBUG_KMS("PSR not supported by this panel\n");
1870 return;
1871 }
1859 1872
1860 if (intel_edp_psr_match_conditions(intel_dp) && 1873 mutex_lock(&dev_priv->psr.lock);
1861 !intel_edp_is_psr_enabled(dev)) 1874 if (dev_priv->psr.enabled) {
1862 intel_edp_psr_do_enable(intel_dp); 1875 DRM_DEBUG_KMS("PSR already in use\n");
1876 mutex_unlock(&dev_priv->psr.lock);
1877 return;
1878 }
1879
1880 dev_priv->psr.busy_frontbuffer_bits = 0;
1881
1882 /* Setup PSR once */
1883 intel_edp_psr_setup(intel_dp);
1884
1885 if (intel_edp_psr_match_conditions(intel_dp))
1886 dev_priv->psr.enabled = intel_dp;
1887 mutex_unlock(&dev_priv->psr.lock);
1863} 1888}
1864 1889
1865void intel_edp_psr_disable(struct intel_dp *intel_dp) 1890void intel_edp_psr_disable(struct intel_dp *intel_dp)
@@ -1867,36 +1892,136 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
1867 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1892 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1868 struct drm_i915_private *dev_priv = dev->dev_private; 1893 struct drm_i915_private *dev_priv = dev->dev_private;
1869 1894
1870 if (!intel_edp_is_psr_enabled(dev)) 1895 mutex_lock(&dev_priv->psr.lock);
1896 if (!dev_priv->psr.enabled) {
1897 mutex_unlock(&dev_priv->psr.lock);
1871 return; 1898 return;
1899 }
1900
1901 if (dev_priv->psr.active) {
1902 I915_WRITE(EDP_PSR_CTL(dev),
1903 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
1904
1905 /* Wait till PSR is idle */
1906 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
1907 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1908 DRM_ERROR("Timed out waiting for PSR Idle State\n");
1872 1909
1873 I915_WRITE(EDP_PSR_CTL(dev), 1910 dev_priv->psr.active = false;
1874 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE); 1911 } else {
1912 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
1913 }
1914
1915 dev_priv->psr.enabled = NULL;
1916 mutex_unlock(&dev_priv->psr.lock);
1875 1917
1876 /* Wait till PSR is idle */ 1918 cancel_delayed_work_sync(&dev_priv->psr.work);
1877 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
1878 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1879 DRM_ERROR("Timed out waiting for PSR Idle State\n");
1880} 1919}
1881 1920
1882void intel_edp_psr_update(struct drm_device *dev) 1921static void intel_edp_psr_work(struct work_struct *work)
1883{ 1922{
1884 struct intel_encoder *encoder; 1923 struct drm_i915_private *dev_priv =
1885 struct intel_dp *intel_dp = NULL; 1924 container_of(work, typeof(*dev_priv), psr.work.work);
1925 struct intel_dp *intel_dp = dev_priv->psr.enabled;
1886 1926
1887 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) 1927 mutex_lock(&dev_priv->psr.lock);
1888 if (encoder->type == INTEL_OUTPUT_EDP) { 1928 intel_dp = dev_priv->psr.enabled;
1889 intel_dp = enc_to_intel_dp(&encoder->base);
1890 1929
1891 if (!is_edp_psr(dev)) 1930 if (!intel_dp)
1892 return; 1931 goto unlock;
1893 1932
1894 if (!intel_edp_psr_match_conditions(intel_dp)) 1933 /*
1895 intel_edp_psr_disable(intel_dp); 1934 * The delayed work can race with an invalidate hence we need to
1896 else 1935 * recheck. Since psr_flush first clears this and then reschedules we
1897 if (!intel_edp_is_psr_enabled(dev)) 1936 * won't ever miss a flush when bailing out here.
1898 intel_edp_psr_do_enable(intel_dp); 1937 */
1899 } 1938 if (dev_priv->psr.busy_frontbuffer_bits)
1939 goto unlock;
1940
1941 intel_edp_psr_do_enable(intel_dp);
1942unlock:
1943 mutex_unlock(&dev_priv->psr.lock);
1944}
1945
1946static void intel_edp_psr_do_exit(struct drm_device *dev)
1947{
1948 struct drm_i915_private *dev_priv = dev->dev_private;
1949
1950 if (dev_priv->psr.active) {
1951 u32 val = I915_READ(EDP_PSR_CTL(dev));
1952
1953 WARN_ON(!(val & EDP_PSR_ENABLE));
1954
1955 I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
1956
1957 dev_priv->psr.active = false;
1958 }
1959
1960}
1961
1962void intel_edp_psr_invalidate(struct drm_device *dev,
1963 unsigned frontbuffer_bits)
1964{
1965 struct drm_i915_private *dev_priv = dev->dev_private;
1966 struct drm_crtc *crtc;
1967 enum pipe pipe;
1968
1969 mutex_lock(&dev_priv->psr.lock);
1970 if (!dev_priv->psr.enabled) {
1971 mutex_unlock(&dev_priv->psr.lock);
1972 return;
1973 }
1974
1975 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
1976 pipe = to_intel_crtc(crtc)->pipe;
1977
1978 intel_edp_psr_do_exit(dev);
1979
1980 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
1981
1982 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
1983 mutex_unlock(&dev_priv->psr.lock);
1984}
1985
1986void intel_edp_psr_flush(struct drm_device *dev,
1987 unsigned frontbuffer_bits)
1988{
1989 struct drm_i915_private *dev_priv = dev->dev_private;
1990 struct drm_crtc *crtc;
1991 enum pipe pipe;
1992
1993 mutex_lock(&dev_priv->psr.lock);
1994 if (!dev_priv->psr.enabled) {
1995 mutex_unlock(&dev_priv->psr.lock);
1996 return;
1997 }
1998
1999 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
2000 pipe = to_intel_crtc(crtc)->pipe;
2001 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
2002
2003 /*
2004 * On Haswell sprite plane updates don't result in a psr invalidating
2005 * signal in the hardware. Which means we need to manually fake this in
2006 * software for all flushes, not just when we've seen a preceding
2007 * invalidation through frontbuffer rendering.
2008 */
2009 if (IS_HASWELL(dev) &&
2010 (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
2011 intel_edp_psr_do_exit(dev);
2012
2013 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
2014 schedule_delayed_work(&dev_priv->psr.work,
2015 msecs_to_jiffies(100));
2016 mutex_unlock(&dev_priv->psr.lock);
2017}
2018
2019void intel_edp_psr_init(struct drm_device *dev)
2020{
2021 struct drm_i915_private *dev_priv = dev->dev_private;
2022
2023 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
2024 mutex_init(&dev_priv->psr.lock);
1900} 2025}
1901 2026
1902static void intel_disable_dp(struct intel_encoder *encoder) 2027static void intel_disable_dp(struct intel_encoder *encoder)
@@ -2152,6 +2277,70 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
2152 vlv_wait_port_ready(dev_priv, dport); 2277 vlv_wait_port_ready(dev_priv, dport);
2153} 2278}
2154 2279
2280static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2281{
2282 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2283 struct drm_device *dev = encoder->base.dev;
2284 struct drm_i915_private *dev_priv = dev->dev_private;
2285 struct intel_crtc *intel_crtc =
2286 to_intel_crtc(encoder->base.crtc);
2287 enum dpio_channel ch = vlv_dport_to_channel(dport);
2288 enum pipe pipe = intel_crtc->pipe;
2289 u32 val;
2290
2291 mutex_lock(&dev_priv->dpio_lock);
2292
2293 /* program left/right clock distribution */
2294 if (pipe != PIPE_B) {
2295 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2296 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2297 if (ch == DPIO_CH0)
2298 val |= CHV_BUFLEFTENA1_FORCE;
2299 if (ch == DPIO_CH1)
2300 val |= CHV_BUFRIGHTENA1_FORCE;
2301 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2302 } else {
2303 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2304 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2305 if (ch == DPIO_CH0)
2306 val |= CHV_BUFLEFTENA2_FORCE;
2307 if (ch == DPIO_CH1)
2308 val |= CHV_BUFRIGHTENA2_FORCE;
2309 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2310 }
2311
2312 /* program clock channel usage */
2313 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2314 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2315 if (pipe != PIPE_B)
2316 val &= ~CHV_PCS_USEDCLKCHANNEL;
2317 else
2318 val |= CHV_PCS_USEDCLKCHANNEL;
2319 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2320
2321 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2322 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2323 if (pipe != PIPE_B)
2324 val &= ~CHV_PCS_USEDCLKCHANNEL;
2325 else
2326 val |= CHV_PCS_USEDCLKCHANNEL;
2327 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2328
2329 /*
2330 * This a a bit weird since generally CL
2331 * matches the pipe, but here we need to
2332 * pick the CL based on the port.
2333 */
2334 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2335 if (pipe != PIPE_B)
2336 val &= ~CHV_CMN_USEDCLKCHANNEL;
2337 else
2338 val |= CHV_CMN_USEDCLKCHANNEL;
2339 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2340
2341 mutex_unlock(&dev_priv->dpio_lock);
2342}
2343
2155/* 2344/*
2156 * Native read with retry for link status and receiver capability reads for 2345 * Native read with retry for link status and receiver capability reads for
2157 * cases where the sink may still be asleep. 2346 * cases where the sink may still be asleep.
@@ -2189,18 +2378,14 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
2189 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; 2378 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2190} 2379}
2191 2380
2192/* 2381/* These are source-specific values. */
2193 * These are source-specific values; current Intel hardware supports
2194 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
2195 */
2196
2197static uint8_t 2382static uint8_t
2198intel_dp_voltage_max(struct intel_dp *intel_dp) 2383intel_dp_voltage_max(struct intel_dp *intel_dp)
2199{ 2384{
2200 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2385 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2201 enum port port = dp_to_dig_port(intel_dp)->port; 2386 enum port port = dp_to_dig_port(intel_dp)->port;
2202 2387
2203 if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev)) 2388 if (IS_VALLEYVIEW(dev))
2204 return DP_TRAIN_VOLTAGE_SWING_1200; 2389 return DP_TRAIN_VOLTAGE_SWING_1200;
2205 else if (IS_GEN7(dev) && port == PORT_A) 2390 else if (IS_GEN7(dev) && port == PORT_A)
2206 return DP_TRAIN_VOLTAGE_SWING_800; 2391 return DP_TRAIN_VOLTAGE_SWING_800;
@@ -2216,18 +2401,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2216 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2401 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2217 enum port port = dp_to_dig_port(intel_dp)->port; 2402 enum port port = dp_to_dig_port(intel_dp)->port;
2218 2403
2219 if (IS_BROADWELL(dev)) { 2404 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2220 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2221 case DP_TRAIN_VOLTAGE_SWING_400:
2222 case DP_TRAIN_VOLTAGE_SWING_600:
2223 return DP_TRAIN_PRE_EMPHASIS_6;
2224 case DP_TRAIN_VOLTAGE_SWING_800:
2225 return DP_TRAIN_PRE_EMPHASIS_3_5;
2226 case DP_TRAIN_VOLTAGE_SWING_1200:
2227 default:
2228 return DP_TRAIN_PRE_EMPHASIS_0;
2229 }
2230 } else if (IS_HASWELL(dev)) {
2231 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2405 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2232 case DP_TRAIN_VOLTAGE_SWING_400: 2406 case DP_TRAIN_VOLTAGE_SWING_400:
2233 return DP_TRAIN_PRE_EMPHASIS_9_5; 2407 return DP_TRAIN_PRE_EMPHASIS_9_5;
@@ -2699,41 +2873,6 @@ intel_hsw_signal_levels(uint8_t train_set)
2699 } 2873 }
2700} 2874}
2701 2875
2702static uint32_t
2703intel_bdw_signal_levels(uint8_t train_set)
2704{
2705 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2706 DP_TRAIN_PRE_EMPHASIS_MASK);
2707 switch (signal_levels) {
2708 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2709 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
2710 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2711 return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */
2712 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2713 return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */
2714
2715 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2716 return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */
2717 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2718 return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */
2719 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2720 return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */
2721
2722 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2723 return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */
2724 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2725 return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */
2726
2727 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2728 return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */
2729
2730 default:
2731 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2732 "0x%x\n", signal_levels);
2733 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
2734 }
2735}
2736
2737/* Properly updates "DP" with the correct signal levels. */ 2876/* Properly updates "DP" with the correct signal levels. */
2738static void 2877static void
2739intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) 2878intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -2744,10 +2883,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2744 uint32_t signal_levels, mask; 2883 uint32_t signal_levels, mask;
2745 uint8_t train_set = intel_dp->train_set[0]; 2884 uint8_t train_set = intel_dp->train_set[0];
2746 2885
2747 if (IS_BROADWELL(dev)) { 2886 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2748 signal_levels = intel_bdw_signal_levels(train_set);
2749 mask = DDI_BUF_EMP_MASK;
2750 } else if (IS_HASWELL(dev)) {
2751 signal_levels = intel_hsw_signal_levels(train_set); 2887 signal_levels = intel_hsw_signal_levels(train_set);
2752 mask = DDI_BUF_EMP_MASK; 2888 mask = DDI_BUF_EMP_MASK;
2753 } else if (IS_CHERRYVIEW(dev)) { 2889 } else if (IS_CHERRYVIEW(dev)) {
@@ -3246,6 +3382,33 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
3246 edp_panel_vdd_off(intel_dp, false); 3382 edp_panel_vdd_off(intel_dp, false);
3247} 3383}
3248 3384
3385static bool
3386intel_dp_probe_mst(struct intel_dp *intel_dp)
3387{
3388 u8 buf[1];
3389
3390 if (!intel_dp->can_mst)
3391 return false;
3392
3393 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3394 return false;
3395
3396 _edp_panel_vdd_on(intel_dp);
3397 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3398 if (buf[0] & DP_MST_CAP) {
3399 DRM_DEBUG_KMS("Sink is MST capable\n");
3400 intel_dp->is_mst = true;
3401 } else {
3402 DRM_DEBUG_KMS("Sink is not MST capable\n");
3403 intel_dp->is_mst = false;
3404 }
3405 }
3406 edp_panel_vdd_off(intel_dp, false);
3407
3408 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3409 return intel_dp->is_mst;
3410}
3411
3249int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) 3412int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3250{ 3413{
3251 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3414 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -3283,6 +3446,20 @@ intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3283 sink_irq_vector, 1) == 1; 3446 sink_irq_vector, 1) == 1;
3284} 3447}
3285 3448
3449static bool
3450intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3451{
3452 int ret;
3453
3454 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3455 DP_SINK_COUNT_ESI,
3456 sink_irq_vector, 14);
3457 if (ret != 14)
3458 return false;
3459
3460 return true;
3461}
3462
3286static void 3463static void
3287intel_dp_handle_test_request(struct intel_dp *intel_dp) 3464intel_dp_handle_test_request(struct intel_dp *intel_dp)
3288{ 3465{
@@ -3290,6 +3467,63 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
3290 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK); 3467 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3291} 3468}
3292 3469
3470static int
3471intel_dp_check_mst_status(struct intel_dp *intel_dp)
3472{
3473 bool bret;
3474
3475 if (intel_dp->is_mst) {
3476 u8 esi[16] = { 0 };
3477 int ret = 0;
3478 int retry;
3479 bool handled;
3480 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3481go_again:
3482 if (bret == true) {
3483
3484 /* check link status - esi[10] = 0x200c */
3485 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3486 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3487 intel_dp_start_link_train(intel_dp);
3488 intel_dp_complete_link_train(intel_dp);
3489 intel_dp_stop_link_train(intel_dp);
3490 }
3491
3492 DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3493 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3494
3495 if (handled) {
3496 for (retry = 0; retry < 3; retry++) {
3497 int wret;
3498 wret = drm_dp_dpcd_write(&intel_dp->aux,
3499 DP_SINK_COUNT_ESI+1,
3500 &esi[1], 3);
3501 if (wret == 3) {
3502 break;
3503 }
3504 }
3505
3506 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3507 if (bret == true) {
3508 DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3509 goto go_again;
3510 }
3511 } else
3512 ret = 0;
3513
3514 return ret;
3515 } else {
3516 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3517 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3518 intel_dp->is_mst = false;
3519 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3520 /* send a hotplug event */
3521 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3522 }
3523 }
3524 return -EINVAL;
3525}
3526
3293/* 3527/*
3294 * According to DP spec 3528 * According to DP spec
3295 * 5.1.2: 3529 * 5.1.2:
@@ -3298,15 +3532,16 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
3298 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 3532 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
3299 * 4. Check link status on receipt of hot-plug interrupt 3533 * 4. Check link status on receipt of hot-plug interrupt
3300 */ 3534 */
3301
3302void 3535void
3303intel_dp_check_link_status(struct intel_dp *intel_dp) 3536intel_dp_check_link_status(struct intel_dp *intel_dp)
3304{ 3537{
3538 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3305 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 3539 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
3306 u8 sink_irq_vector; 3540 u8 sink_irq_vector;
3307 u8 link_status[DP_LINK_STATUS_SIZE]; 3541 u8 link_status[DP_LINK_STATUS_SIZE];
3308 3542
3309 /* FIXME: This access isn't protected by any locks. */ 3543 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3544
3310 if (!intel_encoder->connectors_active) 3545 if (!intel_encoder->connectors_active)
3311 return; 3546 return;
3312 3547
@@ -3518,8 +3753,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
3518 enum drm_connector_status status; 3753 enum drm_connector_status status;
3519 enum intel_display_power_domain power_domain; 3754 enum intel_display_power_domain power_domain;
3520 struct edid *edid = NULL; 3755 struct edid *edid = NULL;
3521 3756 bool ret;
3522 intel_runtime_pm_get(dev_priv);
3523 3757
3524 power_domain = intel_display_port_power_domain(intel_encoder); 3758 power_domain = intel_display_port_power_domain(intel_encoder);
3525 intel_display_power_get(dev_priv, power_domain); 3759 intel_display_power_get(dev_priv, power_domain);
@@ -3527,6 +3761,14 @@ intel_dp_detect(struct drm_connector *connector, bool force)
3527 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 3761 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3528 connector->base.id, connector->name); 3762 connector->base.id, connector->name);
3529 3763
3764 if (intel_dp->is_mst) {
3765 /* MST devices are disconnected from a monitor POV */
3766 if (intel_encoder->type != INTEL_OUTPUT_EDP)
3767 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3768 status = connector_status_disconnected;
3769 goto out;
3770 }
3771
3530 intel_dp->has_audio = false; 3772 intel_dp->has_audio = false;
3531 3773
3532 if (HAS_PCH_SPLIT(dev)) 3774 if (HAS_PCH_SPLIT(dev))
@@ -3539,6 +3781,16 @@ intel_dp_detect(struct drm_connector *connector, bool force)
3539 3781
3540 intel_dp_probe_oui(intel_dp); 3782 intel_dp_probe_oui(intel_dp);
3541 3783
3784 ret = intel_dp_probe_mst(intel_dp);
3785 if (ret) {
3786 /* if we are in MST mode then this connector
3787 won't appear connected or have anything with EDID on it */
3788 if (intel_encoder->type != INTEL_OUTPUT_EDP)
3789 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3790 status = connector_status_disconnected;
3791 goto out;
3792 }
3793
3542 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 3794 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
3543 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); 3795 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
3544 } else { 3796 } else {
@@ -3555,9 +3807,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
3555 3807
3556out: 3808out:
3557 intel_display_power_put(dev_priv, power_domain); 3809 intel_display_power_put(dev_priv, power_domain);
3558
3559 intel_runtime_pm_put(dev_priv);
3560
3561 return status; 3810 return status;
3562} 3811}
3563 3812
@@ -3734,6 +3983,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
3734 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3983 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3735 3984
3736 drm_dp_aux_unregister(&intel_dp->aux); 3985 drm_dp_aux_unregister(&intel_dp->aux);
3986 intel_dp_mst_encoder_cleanup(intel_dig_port);
3737 drm_encoder_cleanup(encoder); 3987 drm_encoder_cleanup(encoder);
3738 if (is_edp(intel_dp)) { 3988 if (is_edp(intel_dp)) {
3739 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 3989 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
@@ -3766,12 +4016,64 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
3766 .destroy = intel_dp_encoder_destroy, 4016 .destroy = intel_dp_encoder_destroy,
3767}; 4017};
3768 4018
3769static void 4019void
3770intel_dp_hot_plug(struct intel_encoder *intel_encoder) 4020intel_dp_hot_plug(struct intel_encoder *intel_encoder)
3771{ 4021{
3772 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 4022 return;
4023}
4024
4025bool
4026intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4027{
4028 struct intel_dp *intel_dp = &intel_dig_port->dp;
4029 struct drm_device *dev = intel_dig_port->base.base.dev;
4030 struct drm_i915_private *dev_priv = dev->dev_private;
4031 int ret;
4032 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4033 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4034
4035 DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
4036 long_hpd ? "long" : "short");
4037
4038 if (long_hpd) {
4039 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4040 goto mst_fail;
3773 4041
3774 intel_dp_check_link_status(intel_dp); 4042 if (!intel_dp_get_dpcd(intel_dp)) {
4043 goto mst_fail;
4044 }
4045
4046 intel_dp_probe_oui(intel_dp);
4047
4048 if (!intel_dp_probe_mst(intel_dp))
4049 goto mst_fail;
4050
4051 } else {
4052 if (intel_dp->is_mst) {
4053 ret = intel_dp_check_mst_status(intel_dp);
4054 if (ret == -EINVAL)
4055 goto mst_fail;
4056 }
4057
4058 if (!intel_dp->is_mst) {
4059 /*
4060 * we'll check the link status via the normal hot plug path later -
4061 * but for short hpds we should check it now
4062 */
4063 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4064 intel_dp_check_link_status(intel_dp);
4065 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4066 }
4067 }
4068 return false;
4069mst_fail:
4070 /* if we were in MST mode, and device is not there get out of MST mode */
4071 if (intel_dp->is_mst) {
4072 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4073 intel_dp->is_mst = false;
4074 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4075 }
4076 return true;
3775} 4077}
3776 4078
3777/* Return which DP Port should be selected for Transcoder DP control */ 4079/* Return which DP Port should be selected for Transcoder DP control */
@@ -3822,7 +4124,7 @@ bool intel_dp_is_edp(struct drm_device *dev, enum port port)
3822 return false; 4124 return false;
3823} 4125}
3824 4126
3825static void 4127void
3826intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 4128intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
3827{ 4129{
3828 struct intel_connector *intel_connector = to_intel_connector(connector); 4130 struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -4035,6 +4337,11 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4035 return; 4337 return;
4036 } 4338 }
4037 4339
4340 /*
4341 * FIXME: This needs proper synchronization with psr state. But really
4342 * hard to tell without seeing the user of this function of this code.
4343 * Check locking and ordering once that lands.
4344 */
4038 if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) { 4345 if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
4039 DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n"); 4346 DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
4040 return; 4347 return;
@@ -4288,7 +4595,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
4288 edp_panel_vdd_work); 4595 edp_panel_vdd_work);
4289 4596
4290 intel_connector_attach_encoder(intel_connector, intel_encoder); 4597 intel_connector_attach_encoder(intel_connector, intel_encoder);
4291 drm_sysfs_connector_add(connector); 4598 drm_connector_register(connector);
4292 4599
4293 if (HAS_DDI(dev)) 4600 if (HAS_DDI(dev))
4294 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 4601 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
@@ -4321,7 +4628,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
4321 4628
4322 intel_dp_aux_init(intel_dp, intel_connector); 4629 intel_dp_aux_init(intel_dp, intel_connector);
4323 4630
4324 intel_dp->psr_setup_done = false; 4631 /* init MST on ports that can support it */
4632 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4633 if (port == PORT_B || port == PORT_C || port == PORT_D) {
4634 intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id);
4635 }
4636 }
4325 4637
4326 if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) { 4638 if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
4327 drm_dp_aux_unregister(&intel_dp->aux); 4639 drm_dp_aux_unregister(&intel_dp->aux);
@@ -4331,7 +4643,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
4331 edp_panel_vdd_off_sync(intel_dp); 4643 edp_panel_vdd_off_sync(intel_dp);
4332 drm_modeset_unlock(&dev->mode_config.connection_mutex); 4644 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4333 } 4645 }
4334 drm_sysfs_connector_remove(connector); 4646 drm_connector_unregister(connector);
4335 drm_connector_cleanup(connector); 4647 drm_connector_cleanup(connector);
4336 return false; 4648 return false;
4337 } 4649 }
@@ -4353,6 +4665,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
4353void 4665void
4354intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 4666intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
4355{ 4667{
4668 struct drm_i915_private *dev_priv = dev->dev_private;
4356 struct intel_digital_port *intel_dig_port; 4669 struct intel_digital_port *intel_dig_port;
4357 struct intel_encoder *intel_encoder; 4670 struct intel_encoder *intel_encoder;
4358 struct drm_encoder *encoder; 4671 struct drm_encoder *encoder;
@@ -4379,6 +4692,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
4379 intel_encoder->get_hw_state = intel_dp_get_hw_state; 4692 intel_encoder->get_hw_state = intel_dp_get_hw_state;
4380 intel_encoder->get_config = intel_dp_get_config; 4693 intel_encoder->get_config = intel_dp_get_config;
4381 if (IS_CHERRYVIEW(dev)) { 4694 if (IS_CHERRYVIEW(dev)) {
4695 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
4382 intel_encoder->pre_enable = chv_pre_enable_dp; 4696 intel_encoder->pre_enable = chv_pre_enable_dp;
4383 intel_encoder->enable = vlv_enable_dp; 4697 intel_encoder->enable = vlv_enable_dp;
4384 intel_encoder->post_disable = chv_post_disable_dp; 4698 intel_encoder->post_disable = chv_post_disable_dp;
@@ -4408,9 +4722,55 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
4408 intel_encoder->cloneable = 0; 4722 intel_encoder->cloneable = 0;
4409 intel_encoder->hot_plug = intel_dp_hot_plug; 4723 intel_encoder->hot_plug = intel_dp_hot_plug;
4410 4724
4725 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
4726 dev_priv->hpd_irq_port[port] = intel_dig_port;
4727
4411 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { 4728 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
4412 drm_encoder_cleanup(encoder); 4729 drm_encoder_cleanup(encoder);
4413 kfree(intel_dig_port); 4730 kfree(intel_dig_port);
4414 kfree(intel_connector); 4731 kfree(intel_connector);
4415 } 4732 }
4416} 4733}
4734
4735void intel_dp_mst_suspend(struct drm_device *dev)
4736{
4737 struct drm_i915_private *dev_priv = dev->dev_private;
4738 int i;
4739
4740 /* disable MST */
4741 for (i = 0; i < I915_MAX_PORTS; i++) {
4742 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
4743 if (!intel_dig_port)
4744 continue;
4745
4746 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
4747 if (!intel_dig_port->dp.can_mst)
4748 continue;
4749 if (intel_dig_port->dp.is_mst)
4750 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
4751 }
4752 }
4753}
4754
4755void intel_dp_mst_resume(struct drm_device *dev)
4756{
4757 struct drm_i915_private *dev_priv = dev->dev_private;
4758 int i;
4759
4760 for (i = 0; i < I915_MAX_PORTS; i++) {
4761 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
4762 if (!intel_dig_port)
4763 continue;
4764 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
4765 int ret;
4766
4767 if (!intel_dig_port->dp.can_mst)
4768 continue;
4769
4770 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
4771 if (ret != 0) {
4772 intel_dp_check_mst_status(&intel_dig_port->dp);
4773 }
4774 }
4775 }
4776}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
new file mode 100644
index 000000000000..d9a7a7865f66
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -0,0 +1,548 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 * 2014 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
26#include <drm/drmP.h>
27#include "i915_drv.h"
28#include "intel_drv.h"
29#include <drm/drm_crtc_helper.h>
30#include <drm/drm_edid.h>
31
32static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
33 struct intel_crtc_config *pipe_config)
34{
35 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
36 struct intel_digital_port *intel_dig_port = intel_mst->primary;
37 struct intel_dp *intel_dp = &intel_dig_port->dp;
38 struct drm_device *dev = encoder->base.dev;
39 int bpp;
40 int lane_count, slots;
41 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
42 struct intel_connector *found = NULL, *intel_connector;
43 int mst_pbn;
44
45 pipe_config->dp_encoder_is_mst = true;
46 pipe_config->has_pch_encoder = false;
47 pipe_config->has_dp_encoder = true;
48 bpp = 24;
49 /*
50 * for MST we always configure max link bw - the spec doesn't
51 * seem to suggest we should do otherwise.
52 */
53 lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
54 intel_dp->link_bw = intel_dp_max_link_bw(intel_dp);
55 intel_dp->lane_count = lane_count;
56
57 pipe_config->pipe_bpp = 24;
58 pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
59
60 list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
61 if (intel_connector->new_encoder == encoder) {
62 found = intel_connector;
63 break;
64 }
65 }
66
67 if (!found) {
68 DRM_ERROR("can't find connector\n");
69 return false;
70 }
71
72 mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
73
74 pipe_config->pbn = mst_pbn;
75 slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn);
76
77 intel_link_compute_m_n(bpp, lane_count,
78 adjusted_mode->crtc_clock,
79 pipe_config->port_clock,
80 &pipe_config->dp_m_n);
81
82 pipe_config->dp_m_n.tu = slots;
83 return true;
84
85}
86
87static void intel_mst_disable_dp(struct intel_encoder *encoder)
88{
89 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
90 struct intel_digital_port *intel_dig_port = intel_mst->primary;
91 struct intel_dp *intel_dp = &intel_dig_port->dp;
92 int ret;
93
94 DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
95
96 drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->port);
97
98 ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
99 if (ret) {
100 DRM_ERROR("failed to update payload %d\n", ret);
101 }
102}
103
104static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
105{
106 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
107 struct intel_digital_port *intel_dig_port = intel_mst->primary;
108 struct intel_dp *intel_dp = &intel_dig_port->dp;
109
110 DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
111
112 /* this can fail */
113 drm_dp_check_act_status(&intel_dp->mst_mgr);
114 /* and this can also fail */
115 drm_dp_update_payload_part2(&intel_dp->mst_mgr);
116
117 drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->port);
118
119 intel_dp->active_mst_links--;
120 intel_mst->port = NULL;
121 if (intel_dp->active_mst_links == 0) {
122 intel_dig_port->base.post_disable(&intel_dig_port->base);
123 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
124 }
125}
126
127static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
128{
129 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
130 struct intel_digital_port *intel_dig_port = intel_mst->primary;
131 struct intel_dp *intel_dp = &intel_dig_port->dp;
132 struct drm_device *dev = encoder->base.dev;
133 struct drm_i915_private *dev_priv = dev->dev_private;
134 enum port port = intel_dig_port->port;
135 int ret;
136 uint32_t temp;
137 struct intel_connector *found = NULL, *intel_connector;
138 int slots;
139 struct drm_crtc *crtc = encoder->base.crtc;
140 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
141
142 list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
143 if (intel_connector->new_encoder == encoder) {
144 found = intel_connector;
145 break;
146 }
147 }
148
149 if (!found) {
150 DRM_ERROR("can't find connector\n");
151 return;
152 }
153
154 DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
155 intel_mst->port = found->port;
156
157 if (intel_dp->active_mst_links == 0) {
158 enum port port = intel_ddi_get_encoder_port(encoder);
159
160 I915_WRITE(PORT_CLK_SEL(port), intel_crtc->config.ddi_pll_sel);
161
162 intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
163
164 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
165
166
167 intel_dp_start_link_train(intel_dp);
168 intel_dp_complete_link_train(intel_dp);
169 intel_dp_stop_link_train(intel_dp);
170 }
171
172 ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
173 intel_mst->port, intel_crtc->config.pbn, &slots);
174 if (ret == false) {
175 DRM_ERROR("failed to allocate vcpi\n");
176 return;
177 }
178
179
180 intel_dp->active_mst_links++;
181 temp = I915_READ(DP_TP_STATUS(port));
182 I915_WRITE(DP_TP_STATUS(port), temp);
183
184 ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
185}
186
187static void intel_mst_enable_dp(struct intel_encoder *encoder)
188{
189 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
190 struct intel_digital_port *intel_dig_port = intel_mst->primary;
191 struct intel_dp *intel_dp = &intel_dig_port->dp;
192 struct drm_device *dev = intel_dig_port->base.base.dev;
193 struct drm_i915_private *dev_priv = dev->dev_private;
194 enum port port = intel_dig_port->port;
195 int ret;
196
197 DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
198
199 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT),
200 1))
201 DRM_ERROR("Timed out waiting for ACT sent\n");
202
203 ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
204
205 ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
206}
207
208static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
209 enum pipe *pipe)
210{
211 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
212 *pipe = intel_mst->pipe;
213 if (intel_mst->port)
214 return true;
215 return false;
216}
217
218static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
219 struct intel_crtc_config *pipe_config)
220{
221 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
222 struct intel_digital_port *intel_dig_port = intel_mst->primary;
223 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
224 struct drm_device *dev = encoder->base.dev;
225 struct drm_i915_private *dev_priv = dev->dev_private;
226 enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
227 u32 temp, flags = 0;
228
229 pipe_config->has_dp_encoder = true;
230
231 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
232 if (temp & TRANS_DDI_PHSYNC)
233 flags |= DRM_MODE_FLAG_PHSYNC;
234 else
235 flags |= DRM_MODE_FLAG_NHSYNC;
236 if (temp & TRANS_DDI_PVSYNC)
237 flags |= DRM_MODE_FLAG_PVSYNC;
238 else
239 flags |= DRM_MODE_FLAG_NVSYNC;
240
241 switch (temp & TRANS_DDI_BPC_MASK) {
242 case TRANS_DDI_BPC_6:
243 pipe_config->pipe_bpp = 18;
244 break;
245 case TRANS_DDI_BPC_8:
246 pipe_config->pipe_bpp = 24;
247 break;
248 case TRANS_DDI_BPC_10:
249 pipe_config->pipe_bpp = 30;
250 break;
251 case TRANS_DDI_BPC_12:
252 pipe_config->pipe_bpp = 36;
253 break;
254 default:
255 break;
256 }
257 pipe_config->adjusted_mode.flags |= flags;
258 intel_dp_get_m_n(crtc, pipe_config);
259
260 intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
261}
262
263static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
264{
265 struct intel_connector *intel_connector = to_intel_connector(connector);
266 struct intel_dp *intel_dp = intel_connector->mst_port;
267 struct edid *edid;
268 int ret;
269
270 edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
271 if (!edid)
272 return 0;
273
274 ret = intel_connector_update_modes(connector, edid);
275 kfree(edid);
276
277 return ret;
278}
279
280static enum drm_connector_status
281intel_mst_port_dp_detect(struct drm_connector *connector)
282{
283 struct intel_connector *intel_connector = to_intel_connector(connector);
284 struct intel_dp *intel_dp = intel_connector->mst_port;
285
286 return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port);
287}
288
289static enum drm_connector_status
290intel_dp_mst_detect(struct drm_connector *connector, bool force)
291{
292 enum drm_connector_status status;
293 status = intel_mst_port_dp_detect(connector);
294 return status;
295}
296
297static int
298intel_dp_mst_set_property(struct drm_connector *connector,
299 struct drm_property *property,
300 uint64_t val)
301{
302 return 0;
303}
304
305static void
306intel_dp_mst_connector_destroy(struct drm_connector *connector)
307{
308 struct intel_connector *intel_connector = to_intel_connector(connector);
309
310 if (!IS_ERR_OR_NULL(intel_connector->edid))
311 kfree(intel_connector->edid);
312
313 drm_connector_cleanup(connector);
314 kfree(connector);
315}
316
317static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
318 .dpms = intel_connector_dpms,
319 .detect = intel_dp_mst_detect,
320 .fill_modes = drm_helper_probe_single_connector_modes,
321 .set_property = intel_dp_mst_set_property,
322 .destroy = intel_dp_mst_connector_destroy,
323};
324
325static int intel_dp_mst_get_modes(struct drm_connector *connector)
326{
327 return intel_dp_mst_get_ddc_modes(connector);
328}
329
330static enum drm_mode_status
331intel_dp_mst_mode_valid(struct drm_connector *connector,
332 struct drm_display_mode *mode)
333{
334 /* TODO - validate mode against available PBN for link */
335 if (mode->clock < 10000)
336 return MODE_CLOCK_LOW;
337
338 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
339 return MODE_H_ILLEGAL;
340
341 return MODE_OK;
342}
343
344static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
345{
346 struct intel_connector *intel_connector = to_intel_connector(connector);
347 struct intel_dp *intel_dp = intel_connector->mst_port;
348 return &intel_dp->mst_encoders[0]->base.base;
349}
350
351static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
352 .get_modes = intel_dp_mst_get_modes,
353 .mode_valid = intel_dp_mst_mode_valid,
354 .best_encoder = intel_mst_best_encoder,
355};
356
357static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
358{
359 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
360
361 drm_encoder_cleanup(encoder);
362 kfree(intel_mst);
363}
364
365static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
366 .destroy = intel_dp_mst_encoder_destroy,
367};
368
369static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
370{
371 if (connector->encoder) {
372 enum pipe pipe;
373 if (!connector->encoder->get_hw_state(connector->encoder, &pipe))
374 return false;
375 return true;
376 }
377 return false;
378}
379
380static void intel_connector_add_to_fbdev(struct intel_connector *connector)
381{
382#ifdef CONFIG_DRM_I915_FBDEV
383 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
384 drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
385#endif
386}
387
388static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
389{
390#ifdef CONFIG_DRM_I915_FBDEV
391 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
392 drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
393#endif
394}
395
396static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *pathprop)
397{
398 struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
399 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
400 struct drm_device *dev = intel_dig_port->base.base.dev;
401 struct intel_connector *intel_connector;
402 struct drm_connector *connector;
403 int i;
404
405 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
406 if (!intel_connector)
407 return NULL;
408
409 connector = &intel_connector->base;
410 drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
411 drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
412
413 intel_connector->unregister = intel_connector_unregister;
414 intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
415 intel_connector->mst_port = intel_dp;
416 intel_connector->port = port;
417
418 for (i = PIPE_A; i <= PIPE_C; i++) {
419 drm_mode_connector_attach_encoder(&intel_connector->base,
420 &intel_dp->mst_encoders[i]->base.base);
421 }
422 intel_dp_add_properties(intel_dp, connector);
423
424 drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
425 drm_mode_connector_set_path_property(connector, pathprop);
426 drm_reinit_primary_mode_group(dev);
427 mutex_lock(&dev->mode_config.mutex);
428 intel_connector_add_to_fbdev(intel_connector);
429 mutex_unlock(&dev->mode_config.mutex);
430 drm_connector_register(&intel_connector->base);
431 return connector;
432}
433
434static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
435 struct drm_connector *connector)
436{
437 struct intel_connector *intel_connector = to_intel_connector(connector);
438 struct drm_device *dev = connector->dev;
439 /* need to nuke the connector */
440 mutex_lock(&dev->mode_config.mutex);
441 intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
442 mutex_unlock(&dev->mode_config.mutex);
443
444 intel_connector->unregister(intel_connector);
445
446 mutex_lock(&dev->mode_config.mutex);
447 intel_connector_remove_from_fbdev(intel_connector);
448 drm_connector_cleanup(connector);
449 mutex_unlock(&dev->mode_config.mutex);
450
451 drm_reinit_primary_mode_group(dev);
452
453 kfree(intel_connector);
454 DRM_DEBUG_KMS("\n");
455}
456
457static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
458{
459 struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
460 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
461 struct drm_device *dev = intel_dig_port->base.base.dev;
462
463 drm_kms_helper_hotplug_event(dev);
464}
465
466static struct drm_dp_mst_topology_cbs mst_cbs = {
467 .add_connector = intel_dp_add_mst_connector,
468 .destroy_connector = intel_dp_destroy_mst_connector,
469 .hotplug = intel_dp_mst_hotplug,
470};
471
472static struct intel_dp_mst_encoder *
473intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum pipe pipe)
474{
475 struct intel_dp_mst_encoder *intel_mst;
476 struct intel_encoder *intel_encoder;
477 struct drm_device *dev = intel_dig_port->base.base.dev;
478
479 intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
480
481 if (!intel_mst)
482 return NULL;
483
484 intel_mst->pipe = pipe;
485 intel_encoder = &intel_mst->base;
486 intel_mst->primary = intel_dig_port;
487
488 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
489 DRM_MODE_ENCODER_DPMST);
490
491 intel_encoder->type = INTEL_OUTPUT_DP_MST;
492 intel_encoder->crtc_mask = 0x7;
493 intel_encoder->cloneable = 0;
494
495 intel_encoder->compute_config = intel_dp_mst_compute_config;
496 intel_encoder->disable = intel_mst_disable_dp;
497 intel_encoder->post_disable = intel_mst_post_disable_dp;
498 intel_encoder->pre_enable = intel_mst_pre_enable_dp;
499 intel_encoder->enable = intel_mst_enable_dp;
500 intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
501 intel_encoder->get_config = intel_dp_mst_enc_get_config;
502
503 return intel_mst;
504
505}
506
507static bool
508intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
509{
510 int i;
511 struct intel_dp *intel_dp = &intel_dig_port->dp;
512
513 for (i = PIPE_A; i <= PIPE_C; i++)
514 intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, i);
515 return true;
516}
517
518int
519intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
520{
521 struct intel_dp *intel_dp = &intel_dig_port->dp;
522 struct drm_device *dev = intel_dig_port->base.base.dev;
523 int ret;
524
525 intel_dp->can_mst = true;
526 intel_dp->mst_mgr.cbs = &mst_cbs;
527
528 /* create encoders */
529 intel_dp_create_fake_mst_encoders(intel_dig_port);
530 ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev->dev, &intel_dp->aux, 16, 3, conn_base_id);
531 if (ret) {
532 intel_dp->can_mst = false;
533 return ret;
534 }
535 return 0;
536}
537
538void
539intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port)
540{
541 struct intel_dp *intel_dp = &intel_dig_port->dp;
542
543 if (!intel_dp->can_mst)
544 return;
545
546 drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
547 /* encoders will get killed by normal cleanup */
548}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f67340ed2c12..8a475a6909c3 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -32,7 +32,7 @@
32#include <drm/drm_crtc.h> 32#include <drm/drm_crtc.h>
33#include <drm/drm_crtc_helper.h> 33#include <drm/drm_crtc_helper.h>
34#include <drm/drm_fb_helper.h> 34#include <drm/drm_fb_helper.h>
35#include <drm/drm_dp_helper.h> 35#include <drm/drm_dp_mst_helper.h>
36 36
37/** 37/**
38 * _wait_for - magic (register) wait macro 38 * _wait_for - magic (register) wait macro
@@ -100,6 +100,7 @@
100#define INTEL_OUTPUT_EDP 8 100#define INTEL_OUTPUT_EDP 8
101#define INTEL_OUTPUT_DSI 9 101#define INTEL_OUTPUT_DSI 9
102#define INTEL_OUTPUT_UNKNOWN 10 102#define INTEL_OUTPUT_UNKNOWN 10
103#define INTEL_OUTPUT_DP_MST 11
103 104
104#define INTEL_DVO_CHIP_NONE 0 105#define INTEL_DVO_CHIP_NONE 0
105#define INTEL_DVO_CHIP_LVDS 1 106#define INTEL_DVO_CHIP_LVDS 1
@@ -165,6 +166,7 @@ struct intel_panel {
165 struct { 166 struct {
166 bool present; 167 bool present;
167 u32 level; 168 u32 level;
169 u32 min;
168 u32 max; 170 u32 max;
169 bool enabled; 171 bool enabled;
170 bool combination_mode; /* gen 2/4 only */ 172 bool combination_mode; /* gen 2/4 only */
@@ -207,6 +209,10 @@ struct intel_connector {
207 /* since POLL and HPD connectors may use the same HPD line keep the native 209 /* since POLL and HPD connectors may use the same HPD line keep the native
208 state of connector->polled in case hotplug storm detection changes it */ 210 state of connector->polled in case hotplug storm detection changes it */
209 u8 polled; 211 u8 polled;
212
213 void *port; /* store this opaque as its illegal to dereference it */
214
215 struct intel_dp *mst_port;
210}; 216};
211 217
212typedef struct dpll { 218typedef struct dpll {
@@ -307,6 +313,9 @@ struct intel_crtc_config {
307 /* Selected dpll when shared or DPLL_ID_PRIVATE. */ 313 /* Selected dpll when shared or DPLL_ID_PRIVATE. */
308 enum intel_dpll_id shared_dpll; 314 enum intel_dpll_id shared_dpll;
309 315
316 /* PORT_CLK_SEL for DDI ports. */
317 uint32_t ddi_pll_sel;
318
310 /* Actual register state of the dpll, for shared dpll cross-checking. */ 319 /* Actual register state of the dpll, for shared dpll cross-checking. */
311 struct intel_dpll_hw_state dpll_hw_state; 320 struct intel_dpll_hw_state dpll_hw_state;
312 321
@@ -338,6 +347,7 @@ struct intel_crtc_config {
338 u32 pos; 347 u32 pos;
339 u32 size; 348 u32 size;
340 bool enabled; 349 bool enabled;
350 bool force_thru;
341 } pch_pfit; 351 } pch_pfit;
342 352
343 /* FDI configuration, only valid if has_pch_encoder is set. */ 353 /* FDI configuration, only valid if has_pch_encoder is set. */
@@ -347,6 +357,9 @@ struct intel_crtc_config {
347 bool ips_enabled; 357 bool ips_enabled;
348 358
349 bool double_wide; 359 bool double_wide;
360
361 bool dp_encoder_is_mst;
362 int pbn;
350}; 363};
351 364
352struct intel_pipe_wm { 365struct intel_pipe_wm {
@@ -358,6 +371,11 @@ struct intel_pipe_wm {
358 bool sprites_scaled; 371 bool sprites_scaled;
359}; 372};
360 373
374struct intel_mmio_flip {
375 u32 seqno;
376 u32 ring_id;
377};
378
361struct intel_crtc { 379struct intel_crtc {
362 struct drm_crtc base; 380 struct drm_crtc base;
363 enum pipe pipe; 381 enum pipe pipe;
@@ -384,7 +402,6 @@ struct intel_crtc {
384 402
385 struct drm_i915_gem_object *cursor_bo; 403 struct drm_i915_gem_object *cursor_bo;
386 uint32_t cursor_addr; 404 uint32_t cursor_addr;
387 int16_t cursor_x, cursor_y;
388 int16_t cursor_width, cursor_height; 405 int16_t cursor_width, cursor_height;
389 uint32_t cursor_cntl; 406 uint32_t cursor_cntl;
390 uint32_t cursor_base; 407 uint32_t cursor_base;
@@ -394,8 +411,6 @@ struct intel_crtc {
394 struct intel_crtc_config *new_config; 411 struct intel_crtc_config *new_config;
395 bool new_enabled; 412 bool new_enabled;
396 413
397 uint32_t ddi_pll_sel;
398
399 /* reset counter value when the last flip was submitted */ 414 /* reset counter value when the last flip was submitted */
400 unsigned int reset_counter; 415 unsigned int reset_counter;
401 416
@@ -412,10 +427,12 @@ struct intel_crtc {
412 wait_queue_head_t vbl_wait; 427 wait_queue_head_t vbl_wait;
413 428
414 int scanline_offset; 429 int scanline_offset;
430 struct intel_mmio_flip mmio_flip;
415}; 431};
416 432
417struct intel_plane_wm_parameters { 433struct intel_plane_wm_parameters {
418 uint32_t horiz_pixels; 434 uint32_t horiz_pixels;
435 uint32_t vert_pixels;
419 uint8_t bytes_per_pixel; 436 uint8_t bytes_per_pixel;
420 bool enabled; 437 bool enabled;
421 bool scaled; 438 bool scaled;
@@ -428,7 +445,6 @@ struct intel_plane {
428 struct drm_i915_gem_object *obj; 445 struct drm_i915_gem_object *obj;
429 bool can_scale; 446 bool can_scale;
430 int max_downscale; 447 int max_downscale;
431 u32 lut_r[1024], lut_g[1024], lut_b[1024];
432 int crtc_x, crtc_y; 448 int crtc_x, crtc_y;
433 unsigned int crtc_w, crtc_h; 449 unsigned int crtc_w, crtc_h;
434 uint32_t src_x, src_y; 450 uint32_t src_x, src_y;
@@ -481,6 +497,7 @@ struct cxsr_latency {
481#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) 497#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
482#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 498#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
483#define to_intel_plane(x) container_of(x, struct intel_plane, base) 499#define to_intel_plane(x) container_of(x, struct intel_plane, base)
500#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
484 501
485struct intel_hdmi { 502struct intel_hdmi {
486 u32 hdmi_reg; 503 u32 hdmi_reg;
@@ -491,6 +508,7 @@ struct intel_hdmi {
491 bool has_audio; 508 bool has_audio;
492 enum hdmi_force_audio force_audio; 509 enum hdmi_force_audio force_audio;
493 bool rgb_quant_range_selectable; 510 bool rgb_quant_range_selectable;
511 enum hdmi_picture_aspect aspect_ratio;
494 void (*write_infoframe)(struct drm_encoder *encoder, 512 void (*write_infoframe)(struct drm_encoder *encoder,
495 enum hdmi_infoframe_type type, 513 enum hdmi_infoframe_type type,
496 const void *frame, ssize_t len); 514 const void *frame, ssize_t len);
@@ -499,6 +517,7 @@ struct intel_hdmi {
499 struct drm_display_mode *adjusted_mode); 517 struct drm_display_mode *adjusted_mode);
500}; 518};
501 519
520struct intel_dp_mst_encoder;
502#define DP_MAX_DOWNSTREAM_PORTS 0x10 521#define DP_MAX_DOWNSTREAM_PORTS 0x10
503 522
504/** 523/**
@@ -537,12 +556,20 @@ struct intel_dp {
537 unsigned long last_power_cycle; 556 unsigned long last_power_cycle;
538 unsigned long last_power_on; 557 unsigned long last_power_on;
539 unsigned long last_backlight_off; 558 unsigned long last_backlight_off;
540 bool psr_setup_done; 559
541 struct notifier_block edp_notifier; 560 struct notifier_block edp_notifier;
542 561
543 bool use_tps3; 562 bool use_tps3;
563 bool can_mst; /* this port supports mst */
564 bool is_mst;
565 int active_mst_links;
566 /* connector directly attached - won't be use for modeset in mst world */
544 struct intel_connector *attached_connector; 567 struct intel_connector *attached_connector;
545 568
569 /* mst connector list */
570 struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
571 struct drm_dp_mst_topology_mgr mst_mgr;
572
546 uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index); 573 uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
547 /* 574 /*
548 * This function returns the value we have to program the AUX_CTL 575 * This function returns the value we have to program the AUX_CTL
@@ -566,6 +593,14 @@ struct intel_digital_port {
566 u32 saved_port_bits; 593 u32 saved_port_bits;
567 struct intel_dp dp; 594 struct intel_dp dp;
568 struct intel_hdmi hdmi; 595 struct intel_hdmi hdmi;
596 bool (*hpd_pulse)(struct intel_digital_port *, bool);
597};
598
599struct intel_dp_mst_encoder {
600 struct intel_encoder base;
601 enum pipe pipe;
602 struct intel_digital_port *primary;
603 void *port; /* store this opaque as its illegal to dereference it */
569}; 604};
570 605
571static inline int 606static inline int
@@ -652,6 +687,12 @@ enc_to_dig_port(struct drm_encoder *encoder)
652 return container_of(encoder, struct intel_digital_port, base.base); 687 return container_of(encoder, struct intel_digital_port, base.base);
653} 688}
654 689
690static inline struct intel_dp_mst_encoder *
691enc_to_mst(struct drm_encoder *encoder)
692{
693 return container_of(encoder, struct intel_dp_mst_encoder, base.base);
694}
695
655static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) 696static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
656{ 697{
657 return &enc_to_dig_port(encoder)->dp; 698 return &enc_to_dig_port(encoder)->dp;
@@ -676,17 +717,26 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
676bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 717bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
677 enum transcoder pch_transcoder, 718 enum transcoder pch_transcoder,
678 bool enable); 719 bool enable);
679void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 720void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
680void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 721void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
681void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 722void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
682void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 723void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
683void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 724void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
684void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 725void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
685void intel_runtime_pm_disable_interrupts(struct drm_device *dev); 726void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
686void intel_runtime_pm_restore_interrupts(struct drm_device *dev); 727void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
728static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
729{
730 /*
731 * We only use drm_irq_uninstall() at unload and VT switch, so
732 * this is the only thing we need to check.
733 */
734 return !dev_priv->pm._irqs_disabled;
735}
736
687int intel_get_crtc_scanline(struct intel_crtc *crtc); 737int intel_get_crtc_scanline(struct intel_crtc *crtc);
688void i9xx_check_fifo_underruns(struct drm_device *dev); 738void i9xx_check_fifo_underruns(struct drm_device *dev);
689 739void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
690 740
691/* intel_crt.c */ 741/* intel_crt.c */
692void intel_crt_init(struct drm_device *dev); 742void intel_crt_init(struct drm_device *dev);
@@ -705,10 +755,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
705 enum transcoder cpu_transcoder); 755 enum transcoder cpu_transcoder);
706void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); 756void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
707void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); 757void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
708void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
709bool intel_ddi_pll_select(struct intel_crtc *crtc); 758bool intel_ddi_pll_select(struct intel_crtc *crtc);
710void intel_ddi_pll_enable(struct intel_crtc *crtc);
711void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
712void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); 759void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
713void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); 760void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
714bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); 761bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
@@ -716,17 +763,46 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc);
716void intel_ddi_get_config(struct intel_encoder *encoder, 763void intel_ddi_get_config(struct intel_encoder *encoder,
717 struct intel_crtc_config *pipe_config); 764 struct intel_crtc_config *pipe_config);
718 765
766void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
767void intel_ddi_clock_get(struct intel_encoder *encoder,
768 struct intel_crtc_config *pipe_config);
769void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
719 770
720/* intel_display.c */ 771/* intel_display.c */
721const char *intel_output_name(int output); 772const char *intel_output_name(int output);
722bool intel_has_pending_fb_unpin(struct drm_device *dev); 773bool intel_has_pending_fb_unpin(struct drm_device *dev);
723int intel_pch_rawclk(struct drm_device *dev); 774int intel_pch_rawclk(struct drm_device *dev);
724int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
725void intel_mark_busy(struct drm_device *dev); 775void intel_mark_busy(struct drm_device *dev);
726void intel_mark_fb_busy(struct drm_i915_gem_object *obj, 776void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
727 struct intel_engine_cs *ring); 777 struct intel_engine_cs *ring);
778void intel_frontbuffer_flip_prepare(struct drm_device *dev,
779 unsigned frontbuffer_bits);
780void intel_frontbuffer_flip_complete(struct drm_device *dev,
781 unsigned frontbuffer_bits);
782void intel_frontbuffer_flush(struct drm_device *dev,
783 unsigned frontbuffer_bits);
784/**
785 * intel_frontbuffer_flip - prepare frontbuffer flip
786 * @dev: DRM device
787 * @frontbuffer_bits: frontbuffer plane tracking bits
788 *
789 * This function gets called after scheduling a flip on @obj. This is for
790 * synchronous plane updates which will happen on the next vblank and which will
791 * not get delayed by pending gpu rendering.
792 *
793 * Can be called without any locks held.
794 */
795static inline
796void intel_frontbuffer_flip(struct drm_device *dev,
797 unsigned frontbuffer_bits)
798{
799 intel_frontbuffer_flush(dev, frontbuffer_bits);
800}
801
802void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
728void intel_mark_idle(struct drm_device *dev); 803void intel_mark_idle(struct drm_device *dev);
729void intel_crtc_restore_mode(struct drm_crtc *crtc); 804void intel_crtc_restore_mode(struct drm_crtc *crtc);
805void intel_crtc_control(struct drm_crtc *crtc, bool enable);
730void intel_crtc_update_dpms(struct drm_crtc *crtc); 806void intel_crtc_update_dpms(struct drm_crtc *crtc);
731void intel_encoder_destroy(struct drm_encoder *encoder); 807void intel_encoder_destroy(struct drm_encoder *encoder);
732void intel_connector_dpms(struct drm_connector *, int mode); 808void intel_connector_dpms(struct drm_connector *, int mode);
@@ -767,12 +843,18 @@ __intel_framebuffer_create(struct drm_device *dev,
767void intel_prepare_page_flip(struct drm_device *dev, int plane); 843void intel_prepare_page_flip(struct drm_device *dev, int plane);
768void intel_finish_page_flip(struct drm_device *dev, int pipe); 844void intel_finish_page_flip(struct drm_device *dev, int pipe);
769void intel_finish_page_flip_plane(struct drm_device *dev, int plane); 845void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
846
847/* shared dpll functions */
770struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); 848struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
771void assert_shared_dpll(struct drm_i915_private *dev_priv, 849void assert_shared_dpll(struct drm_i915_private *dev_priv,
772 struct intel_shared_dpll *pll, 850 struct intel_shared_dpll *pll,
773 bool state); 851 bool state);
774#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) 852#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
775#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) 853#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
854struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
855void intel_put_shared_dpll(struct intel_crtc *crtc);
856
857/* modesetting asserts */
776void assert_pll(struct drm_i915_private *dev_priv, 858void assert_pll(struct drm_i915_private *dev_priv,
777 enum pipe pipe, bool state); 859 enum pipe pipe, bool state);
778#define assert_pll_enabled(d, p) assert_pll(d, p, true) 860#define assert_pll_enabled(d, p) assert_pll(d, p, true)
@@ -805,7 +887,6 @@ void hsw_disable_ips(struct intel_crtc *crtc);
805void intel_display_set_init_power(struct drm_i915_private *dev, bool enable); 887void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
806enum intel_display_power_domain 888enum intel_display_power_domain
807intel_display_port_power_domain(struct intel_encoder *intel_encoder); 889intel_display_port_power_domain(struct intel_encoder *intel_encoder);
808int valleyview_get_vco(struct drm_i915_private *dev_priv);
809void intel_mode_from_pipe_config(struct drm_display_mode *mode, 890void intel_mode_from_pipe_config(struct drm_display_mode *mode,
810 struct intel_crtc_config *pipe_config); 891 struct intel_crtc_config *pipe_config);
811int intel_format_to_fourcc(int format); 892int intel_format_to_fourcc(int format);
@@ -826,6 +907,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
826bool intel_dp_compute_config(struct intel_encoder *encoder, 907bool intel_dp_compute_config(struct intel_encoder *encoder,
827 struct intel_crtc_config *pipe_config); 908 struct intel_crtc_config *pipe_config);
828bool intel_dp_is_edp(struct drm_device *dev, enum port port); 909bool intel_dp_is_edp(struct drm_device *dev, enum port port);
910bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
911 bool long_hpd);
829void intel_edp_backlight_on(struct intel_dp *intel_dp); 912void intel_edp_backlight_on(struct intel_dp *intel_dp);
830void intel_edp_backlight_off(struct intel_dp *intel_dp); 913void intel_edp_backlight_off(struct intel_dp *intel_dp);
831void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); 914void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
@@ -833,11 +916,24 @@ void intel_edp_panel_on(struct intel_dp *intel_dp);
833void intel_edp_panel_off(struct intel_dp *intel_dp); 916void intel_edp_panel_off(struct intel_dp *intel_dp);
834void intel_edp_psr_enable(struct intel_dp *intel_dp); 917void intel_edp_psr_enable(struct intel_dp *intel_dp);
835void intel_edp_psr_disable(struct intel_dp *intel_dp); 918void intel_edp_psr_disable(struct intel_dp *intel_dp);
836void intel_edp_psr_update(struct drm_device *dev);
837void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate); 919void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
838 920void intel_edp_psr_invalidate(struct drm_device *dev,
921 unsigned frontbuffer_bits);
922void intel_edp_psr_flush(struct drm_device *dev,
923 unsigned frontbuffer_bits);
924void intel_edp_psr_init(struct drm_device *dev);
925
926int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd);
927void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
928void intel_dp_mst_suspend(struct drm_device *dev);
929void intel_dp_mst_resume(struct drm_device *dev);
930int intel_dp_max_link_bw(struct intel_dp *intel_dp);
931void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
932/* intel_dp_mst.c */
933int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
934void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
839/* intel_dsi.c */ 935/* intel_dsi.c */
840bool intel_dsi_init(struct drm_device *dev); 936void intel_dsi_init(struct drm_device *dev);
841 937
842 938
843/* intel_dvo.c */ 939/* intel_dvo.c */
@@ -920,8 +1016,8 @@ void intel_pch_panel_fitting(struct intel_crtc *crtc,
920void intel_gmch_panel_fitting(struct intel_crtc *crtc, 1016void intel_gmch_panel_fitting(struct intel_crtc *crtc,
921 struct intel_crtc_config *pipe_config, 1017 struct intel_crtc_config *pipe_config,
922 int fitting_mode); 1018 int fitting_mode);
923void intel_panel_set_backlight(struct intel_connector *connector, u32 level, 1019void intel_panel_set_backlight_acpi(struct intel_connector *connector,
924 u32 max); 1020 u32 level, u32 max);
925int intel_panel_setup_backlight(struct drm_connector *connector); 1021int intel_panel_setup_backlight(struct drm_connector *connector);
926void intel_panel_enable_backlight(struct intel_connector *connector); 1022void intel_panel_enable_backlight(struct intel_connector *connector);
927void intel_panel_disable_backlight(struct intel_connector *connector); 1023void intel_panel_disable_backlight(struct intel_connector *connector);
@@ -940,7 +1036,9 @@ int ilk_wm_max_level(const struct drm_device *dev);
940void intel_update_watermarks(struct drm_crtc *crtc); 1036void intel_update_watermarks(struct drm_crtc *crtc);
941void intel_update_sprite_watermarks(struct drm_plane *plane, 1037void intel_update_sprite_watermarks(struct drm_plane *plane,
942 struct drm_crtc *crtc, 1038 struct drm_crtc *crtc,
943 uint32_t sprite_width, int pixel_size, 1039 uint32_t sprite_width,
1040 uint32_t sprite_height,
1041 int pixel_size,
944 bool enabled, bool scaled); 1042 bool enabled, bool scaled);
945void intel_init_pm(struct drm_device *dev); 1043void intel_init_pm(struct drm_device *dev);
946void intel_pm_setup(struct drm_device *dev); 1044void intel_pm_setup(struct drm_device *dev);
@@ -963,6 +1061,7 @@ void intel_init_gt_powersave(struct drm_device *dev);
963void intel_cleanup_gt_powersave(struct drm_device *dev); 1061void intel_cleanup_gt_powersave(struct drm_device *dev);
964void intel_enable_gt_powersave(struct drm_device *dev); 1062void intel_enable_gt_powersave(struct drm_device *dev);
965void intel_disable_gt_powersave(struct drm_device *dev); 1063void intel_disable_gt_powersave(struct drm_device *dev);
1064void intel_suspend_gt_powersave(struct drm_device *dev);
966void intel_reset_gt_powersave(struct drm_device *dev); 1065void intel_reset_gt_powersave(struct drm_device *dev);
967void ironlake_teardown_rc6(struct drm_device *dev); 1066void ironlake_teardown_rc6(struct drm_device *dev);
968void gen6_update_ring_freq(struct drm_device *dev); 1067void gen6_update_ring_freq(struct drm_device *dev);
@@ -976,8 +1075,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
976void intel_init_runtime_pm(struct drm_i915_private *dev_priv); 1075void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
977void intel_fini_runtime_pm(struct drm_i915_private *dev_priv); 1076void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
978void ilk_wm_get_hw_state(struct drm_device *dev); 1077void ilk_wm_get_hw_state(struct drm_device *dev);
979void __vlv_set_power_well(struct drm_i915_private *dev_priv, 1078
980 enum punit_power_well power_well_id, bool enable);
981 1079
982/* intel_sdvo.c */ 1080/* intel_sdvo.c */
983bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); 1081bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 3fd082933c87..bfcefbf33709 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -658,7 +658,7 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
658 .fill_modes = drm_helper_probe_single_connector_modes, 658 .fill_modes = drm_helper_probe_single_connector_modes,
659}; 659};
660 660
661bool intel_dsi_init(struct drm_device *dev) 661void intel_dsi_init(struct drm_device *dev)
662{ 662{
663 struct intel_dsi *intel_dsi; 663 struct intel_dsi *intel_dsi;
664 struct intel_encoder *intel_encoder; 664 struct intel_encoder *intel_encoder;
@@ -674,29 +674,29 @@ bool intel_dsi_init(struct drm_device *dev)
674 674
675 /* There is no detection method for MIPI so rely on VBT */ 675 /* There is no detection method for MIPI so rely on VBT */
676 if (!dev_priv->vbt.has_mipi) 676 if (!dev_priv->vbt.has_mipi)
677 return false; 677 return;
678
679 if (IS_VALLEYVIEW(dev)) {
680 dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
681 } else {
682 DRM_ERROR("Unsupported Mipi device to reg base");
683 return;
684 }
678 685
679 intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); 686 intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
680 if (!intel_dsi) 687 if (!intel_dsi)
681 return false; 688 return;
682 689
683 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); 690 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
684 if (!intel_connector) { 691 if (!intel_connector) {
685 kfree(intel_dsi); 692 kfree(intel_dsi);
686 return false; 693 return;
687 } 694 }
688 695
689 intel_encoder = &intel_dsi->base; 696 intel_encoder = &intel_dsi->base;
690 encoder = &intel_encoder->base; 697 encoder = &intel_encoder->base;
691 intel_dsi->attached_connector = intel_connector; 698 intel_dsi->attached_connector = intel_connector;
692 699
693 if (IS_VALLEYVIEW(dev)) {
694 dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
695 } else {
696 DRM_ERROR("Unsupported Mipi device to reg base");
697 return false;
698 }
699
700 connector = &intel_connector->base; 700 connector = &intel_connector->base;
701 701
702 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI); 702 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
@@ -743,7 +743,7 @@ bool intel_dsi_init(struct drm_device *dev)
743 743
744 intel_connector_attach_encoder(intel_connector, intel_encoder); 744 intel_connector_attach_encoder(intel_connector, intel_encoder);
745 745
746 drm_sysfs_connector_add(connector); 746 drm_connector_register(connector);
747 747
748 fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev); 748 fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
749 if (!fixed_mode) { 749 if (!fixed_mode) {
@@ -754,12 +754,10 @@ bool intel_dsi_init(struct drm_device *dev)
754 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 754 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
755 intel_panel_init(&intel_connector->panel, fixed_mode, NULL); 755 intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
756 756
757 return true; 757 return;
758 758
759err: 759err:
760 drm_encoder_cleanup(&intel_encoder->base); 760 drm_encoder_cleanup(&intel_encoder->base);
761 kfree(intel_dsi); 761 kfree(intel_dsi);
762 kfree(intel_connector); 762 kfree(intel_connector);
763
764 return false;
765} 763}
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 21a0d348cedc..47c7584a4aa0 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -143,7 +143,7 @@ static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
143 case MIPI_DSI_DCS_LONG_WRITE: 143 case MIPI_DSI_DCS_LONG_WRITE:
144 dsi_vc_dcs_write(intel_dsi, vc, data, len); 144 dsi_vc_dcs_write(intel_dsi, vc, data, len);
145 break; 145 break;
146 }; 146 }
147 147
148 data += len; 148 data += len;
149 149
@@ -294,7 +294,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
294 intel_dsi->rst_timer_val = mipi_config->device_reset_timer; 294 intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
295 intel_dsi->init_count = mipi_config->master_init_timer; 295 intel_dsi->init_count = mipi_config->master_init_timer;
296 intel_dsi->bw_timer = mipi_config->dbi_bw_timer; 296 intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
297 intel_dsi->video_frmt_cfg_bits = mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0; 297 intel_dsi->video_frmt_cfg_bits =
298 mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
298 299
299 switch (intel_dsi->escape_clk_div) { 300 switch (intel_dsi->escape_clk_div) {
300 case 0: 301 case 0:
@@ -351,7 +352,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
351 * 352 *
352 * prepare count 353 * prepare count
353 */ 354 */
354 ths_prepare_ns = max(mipi_config->ths_prepare, mipi_config->tclk_prepare); 355 ths_prepare_ns = max(mipi_config->ths_prepare,
356 mipi_config->tclk_prepare);
355 prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2); 357 prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2);
356 358
357 /* exit zero count */ 359 /* exit zero count */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a3631c0a5c28..56b47d2ffaf7 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -112,7 +112,15 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
112 112
113static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) 113static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
114{ 114{
115 struct drm_device *dev = connector->base.dev;
116 struct drm_i915_private *dev_priv = dev->dev_private;
115 struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base); 117 struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
118 u32 tmp;
119
120 tmp = I915_READ(intel_dvo->dev.dvo_reg);
121
122 if (!(tmp & DVO_ENABLE))
123 return false;
116 124
117 return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev); 125 return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
118} 126}
@@ -558,7 +566,7 @@ void intel_dvo_init(struct drm_device *dev)
558 intel_dvo->panel_wants_dither = true; 566 intel_dvo->panel_wants_dither = true;
559 } 567 }
560 568
561 drm_sysfs_connector_add(connector); 569 drm_connector_register(connector);
562 return; 570 return;
563 } 571 }
564 572
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 088fe9378a4c..f475414671d8 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -43,10 +43,36 @@
43#include <drm/i915_drm.h> 43#include <drm/i915_drm.h>
44#include "i915_drv.h" 44#include "i915_drv.h"
45 45
46static int intel_fbdev_set_par(struct fb_info *info)
47{
48 struct drm_fb_helper *fb_helper = info->par;
49 struct intel_fbdev *ifbdev =
50 container_of(fb_helper, struct intel_fbdev, helper);
51 int ret;
52
53 ret = drm_fb_helper_set_par(info);
54
55 if (ret == 0) {
56 /*
57 * FIXME: fbdev presumes that all callbacks also work from
58 * atomic contexts and relies on that for emergency oops
59 * printing. KMS totally doesn't do that and the locking here is
60 * by far not the only place this goes wrong. Ignore this for
61 * now until we solve this for real.
62 */
63 mutex_lock(&fb_helper->dev->struct_mutex);
64 ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
65 true);
66 mutex_unlock(&fb_helper->dev->struct_mutex);
67 }
68
69 return ret;
70}
71
46static struct fb_ops intelfb_ops = { 72static struct fb_ops intelfb_ops = {
47 .owner = THIS_MODULE, 73 .owner = THIS_MODULE,
48 .fb_check_var = drm_fb_helper_check_var, 74 .fb_check_var = drm_fb_helper_check_var,
49 .fb_set_par = drm_fb_helper_set_par, 75 .fb_set_par = intel_fbdev_set_par,
50 .fb_fillrect = cfb_fillrect, 76 .fb_fillrect = cfb_fillrect,
51 .fb_copyarea = cfb_copyarea, 77 .fb_copyarea = cfb_copyarea,
52 .fb_imageblit = cfb_imageblit, 78 .fb_imageblit = cfb_imageblit,
@@ -81,7 +107,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
81 sizes->surface_depth); 107 sizes->surface_depth);
82 108
83 size = mode_cmd.pitches[0] * mode_cmd.height; 109 size = mode_cmd.pitches[0] * mode_cmd.height;
84 size = ALIGN(size, PAGE_SIZE); 110 size = PAGE_ALIGN(size);
85 obj = i915_gem_object_create_stolen(dev, size); 111 obj = i915_gem_object_create_stolen(dev, size);
86 if (obj == NULL) 112 if (obj == NULL)
87 obj = i915_gem_alloc_object(dev, size); 113 obj = i915_gem_alloc_object(dev, size);
@@ -417,7 +443,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
417 } 443 }
418 crtcs[i] = new_crtc; 444 crtcs[i] = new_crtc;
419 445
420 DRM_DEBUG_KMS("connector %s on pipe %d [CRTC:%d]: %dx%d%s\n", 446 DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
421 connector->name, 447 connector->name,
422 pipe_name(to_intel_crtc(encoder->crtc)->pipe), 448 pipe_name(to_intel_crtc(encoder->crtc)->pipe),
423 encoder->crtc->base.id, 449 encoder->crtc->base.id,
@@ -452,7 +478,7 @@ out:
452 return true; 478 return true;
453} 479}
454 480
455static struct drm_fb_helper_funcs intel_fb_helper_funcs = { 481static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
456 .initial_config = intel_fb_initial_config, 482 .initial_config = intel_fb_initial_config,
457 .gamma_set = intel_crtc_fb_gamma_set, 483 .gamma_set = intel_crtc_fb_gamma_set,
458 .gamma_get = intel_crtc_fb_gamma_get, 484 .gamma_get = intel_crtc_fb_gamma_get,
@@ -623,7 +649,8 @@ int intel_fbdev_init(struct drm_device *dev)
623 if (ifbdev == NULL) 649 if (ifbdev == NULL)
624 return -ENOMEM; 650 return -ENOMEM;
625 651
626 ifbdev->helper.funcs = &intel_fb_helper_funcs; 652 drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
653
627 if (!intel_fbdev_init_bios(dev, ifbdev)) 654 if (!intel_fbdev_init_bios(dev, ifbdev))
628 ifbdev->preferred_bpp = 32; 655 ifbdev->preferred_bpp = 32;
629 656
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index eee2bbec2958..f9151f6641d9 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -367,6 +367,9 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
367 union hdmi_infoframe frame; 367 union hdmi_infoframe frame;
368 int ret; 368 int ret;
369 369
370 /* Set user selected PAR to incoming mode's member */
371 adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
372
370 ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, 373 ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
371 adjusted_mode); 374 adjusted_mode);
372 if (ret < 0) { 375 if (ret < 0) {
@@ -879,7 +882,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
879 struct intel_encoder *encoder; 882 struct intel_encoder *encoder;
880 int count = 0, count_hdmi = 0; 883 int count = 0, count_hdmi = 0;
881 884
882 if (!HAS_PCH_SPLIT(dev)) 885 if (HAS_GMCH_DISPLAY(dev))
883 return false; 886 return false;
884 887
885 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 888 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
@@ -1124,6 +1127,23 @@ intel_hdmi_set_property(struct drm_connector *connector,
1124 goto done; 1127 goto done;
1125 } 1128 }
1126 1129
1130 if (property == connector->dev->mode_config.aspect_ratio_property) {
1131 switch (val) {
1132 case DRM_MODE_PICTURE_ASPECT_NONE:
1133 intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
1134 break;
1135 case DRM_MODE_PICTURE_ASPECT_4_3:
1136 intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
1137 break;
1138 case DRM_MODE_PICTURE_ASPECT_16_9:
1139 intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
1140 break;
1141 default:
1142 return -EINVAL;
1143 }
1144 goto done;
1145 }
1146
1127 return -EINVAL; 1147 return -EINVAL;
1128 1148
1129done: 1149done:
@@ -1229,6 +1249,70 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1229 mutex_unlock(&dev_priv->dpio_lock); 1249 mutex_unlock(&dev_priv->dpio_lock);
1230} 1250}
1231 1251
1252static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1253{
1254 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1255 struct drm_device *dev = encoder->base.dev;
1256 struct drm_i915_private *dev_priv = dev->dev_private;
1257 struct intel_crtc *intel_crtc =
1258 to_intel_crtc(encoder->base.crtc);
1259 enum dpio_channel ch = vlv_dport_to_channel(dport);
1260 enum pipe pipe = intel_crtc->pipe;
1261 u32 val;
1262
1263 mutex_lock(&dev_priv->dpio_lock);
1264
1265 /* program left/right clock distribution */
1266 if (pipe != PIPE_B) {
1267 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1268 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1269 if (ch == DPIO_CH0)
1270 val |= CHV_BUFLEFTENA1_FORCE;
1271 if (ch == DPIO_CH1)
1272 val |= CHV_BUFRIGHTENA1_FORCE;
1273 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1274 } else {
1275 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1276 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1277 if (ch == DPIO_CH0)
1278 val |= CHV_BUFLEFTENA2_FORCE;
1279 if (ch == DPIO_CH1)
1280 val |= CHV_BUFRIGHTENA2_FORCE;
1281 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1282 }
1283
1284 /* program clock channel usage */
1285 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
1286 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
1287 if (pipe != PIPE_B)
1288 val &= ~CHV_PCS_USEDCLKCHANNEL;
1289 else
1290 val |= CHV_PCS_USEDCLKCHANNEL;
1291 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
1292
1293 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
1294 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
1295 if (pipe != PIPE_B)
1296 val &= ~CHV_PCS_USEDCLKCHANNEL;
1297 else
1298 val |= CHV_PCS_USEDCLKCHANNEL;
1299 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
1300
1301 /*
1302 * This a a bit weird since generally CL
1303 * matches the pipe, but here we need to
1304 * pick the CL based on the port.
1305 */
1306 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
1307 if (pipe != PIPE_B)
1308 val &= ~CHV_CMN_USEDCLKCHANNEL;
1309 else
1310 val |= CHV_CMN_USEDCLKCHANNEL;
1311 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
1312
1313 mutex_unlock(&dev_priv->dpio_lock);
1314}
1315
1232static void vlv_hdmi_post_disable(struct intel_encoder *encoder) 1316static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
1233{ 1317{
1234 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1318 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
@@ -1416,11 +1500,22 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
1416}; 1500};
1417 1501
1418static void 1502static void
1503intel_attach_aspect_ratio_property(struct drm_connector *connector)
1504{
1505 if (!drm_mode_create_aspect_ratio_property(connector->dev))
1506 drm_object_attach_property(&connector->base,
1507 connector->dev->mode_config.aspect_ratio_property,
1508 DRM_MODE_PICTURE_ASPECT_NONE);
1509}
1510
1511static void
1419intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) 1512intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
1420{ 1513{
1421 intel_attach_force_audio_property(connector); 1514 intel_attach_force_audio_property(connector);
1422 intel_attach_broadcast_rgb_property(connector); 1515 intel_attach_broadcast_rgb_property(connector);
1423 intel_hdmi->color_range_auto = true; 1516 intel_hdmi->color_range_auto = true;
1517 intel_attach_aspect_ratio_property(connector);
1518 intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
1424} 1519}
1425 1520
1426void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, 1521void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1467,7 +1562,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1467 if (IS_VALLEYVIEW(dev)) { 1562 if (IS_VALLEYVIEW(dev)) {
1468 intel_hdmi->write_infoframe = vlv_write_infoframe; 1563 intel_hdmi->write_infoframe = vlv_write_infoframe;
1469 intel_hdmi->set_infoframes = vlv_set_infoframes; 1564 intel_hdmi->set_infoframes = vlv_set_infoframes;
1470 } else if (!HAS_PCH_SPLIT(dev)) { 1565 } else if (IS_G4X(dev)) {
1471 intel_hdmi->write_infoframe = g4x_write_infoframe; 1566 intel_hdmi->write_infoframe = g4x_write_infoframe;
1472 intel_hdmi->set_infoframes = g4x_set_infoframes; 1567 intel_hdmi->set_infoframes = g4x_set_infoframes;
1473 } else if (HAS_DDI(dev)) { 1568 } else if (HAS_DDI(dev)) {
@@ -1490,7 +1585,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1490 intel_hdmi_add_properties(intel_hdmi, connector); 1585 intel_hdmi_add_properties(intel_hdmi, connector);
1491 1586
1492 intel_connector_attach_encoder(intel_connector, intel_encoder); 1587 intel_connector_attach_encoder(intel_connector, intel_encoder);
1493 drm_sysfs_connector_add(connector); 1588 drm_connector_register(connector);
1494 1589
1495 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 1590 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
1496 * 0xd. Failure to do so will result in spurious interrupts being 1591 * 0xd. Failure to do so will result in spurious interrupts being
@@ -1528,6 +1623,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1528 intel_encoder->get_hw_state = intel_hdmi_get_hw_state; 1623 intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
1529 intel_encoder->get_config = intel_hdmi_get_config; 1624 intel_encoder->get_config = intel_hdmi_get_config;
1530 if (IS_CHERRYVIEW(dev)) { 1625 if (IS_CHERRYVIEW(dev)) {
1626 intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
1531 intel_encoder->pre_enable = chv_hdmi_pre_enable; 1627 intel_encoder->pre_enable = chv_hdmi_pre_enable;
1532 intel_encoder->enable = vlv_enable_hdmi; 1628 intel_encoder->enable = vlv_enable_hdmi;
1533 intel_encoder->post_disable = chv_hdmi_post_disable; 1629 intel_encoder->post_disable = chv_hdmi_post_disable;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d33b61d0dd33..b31088a551f2 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -34,11 +34,6 @@
34#include <drm/i915_drm.h> 34#include <drm/i915_drm.h>
35#include "i915_drv.h" 35#include "i915_drv.h"
36 36
37enum disp_clk {
38 CDCLK,
39 CZCLK
40};
41
42struct gmbus_port { 37struct gmbus_port {
43 const char *name; 38 const char *name;
44 int reg; 39 int reg;
@@ -63,60 +58,11 @@ to_intel_gmbus(struct i2c_adapter *i2c)
63 return container_of(i2c, struct intel_gmbus, adapter); 58 return container_of(i2c, struct intel_gmbus, adapter);
64} 59}
65 60
66static int get_disp_clk_div(struct drm_i915_private *dev_priv,
67 enum disp_clk clk)
68{
69 u32 reg_val;
70 int clk_ratio;
71
72 reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
73
74 if (clk == CDCLK)
75 clk_ratio =
76 ((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
77 else
78 clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
79
80 return clk_ratio;
81}
82
83static void gmbus_set_freq(struct drm_i915_private *dev_priv)
84{
85 int vco, gmbus_freq = 0, cdclk_div;
86
87 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
88
89 vco = valleyview_get_vco(dev_priv);
90
91 /* Get the CDCLK divide ratio */
92 cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
93
94 /*
95 * Program the gmbus_freq based on the cdclk frequency.
96 * BSpec erroneously claims we should aim for 4MHz, but
97 * in fact 1MHz is the correct frequency.
98 */
99 if (cdclk_div)
100 gmbus_freq = (vco << 1) / cdclk_div;
101
102 if (WARN_ON(gmbus_freq == 0))
103 return;
104
105 I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
106}
107
108void 61void
109intel_i2c_reset(struct drm_device *dev) 62intel_i2c_reset(struct drm_device *dev)
110{ 63{
111 struct drm_i915_private *dev_priv = dev->dev_private; 64 struct drm_i915_private *dev_priv = dev->dev_private;
112 65
113 /*
114 * In BIOS-less system, program the correct gmbus frequency
115 * before reading edid.
116 */
117 if (IS_VALLEYVIEW(dev))
118 gmbus_set_freq(dev_priv);
119
120 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); 66 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
121 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0); 67 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
122} 68}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 5e5a72fca5fb..881361c0f27e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -51,6 +51,7 @@ struct intel_lvds_encoder {
51 51
52 bool is_dual_link; 52 bool is_dual_link;
53 u32 reg; 53 u32 reg;
54 u32 a3_power;
54 55
55 struct intel_lvds_connector *attached_connector; 56 struct intel_lvds_connector *attached_connector;
56}; 57};
@@ -71,8 +72,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
71 struct drm_device *dev = encoder->base.dev; 72 struct drm_device *dev = encoder->base.dev;
72 struct drm_i915_private *dev_priv = dev->dev_private; 73 struct drm_i915_private *dev_priv = dev->dev_private;
73 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 74 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
75 enum intel_display_power_domain power_domain;
74 u32 tmp; 76 u32 tmp;
75 77
78 power_domain = intel_display_port_power_domain(encoder);
79 if (!intel_display_power_enabled(dev_priv, power_domain))
80 return false;
81
76 tmp = I915_READ(lvds_encoder->reg); 82 tmp = I915_READ(lvds_encoder->reg);
77 83
78 if (!(tmp & LVDS_PORT_EN)) 84 if (!(tmp & LVDS_PORT_EN))
@@ -172,8 +178,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
172 178
173 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 179 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
174 * appropriately here, but we need to look more thoroughly into how 180 * appropriately here, but we need to look more thoroughly into how
175 * panels behave in the two modes. 181 * panels behave in the two modes. For now, let's just maintain the
182 * value we got from the BIOS.
176 */ 183 */
184 temp &= ~LVDS_A3_POWER_MASK;
185 temp |= lvds_encoder->a3_power;
177 186
178 /* Set the dithering flag on LVDS as needed, note that there is no 187 /* Set the dithering flag on LVDS as needed, note that there is no
179 * special lvds dither control bit on pch-split platforms, dithering is 188 * special lvds dither control bit on pch-split platforms, dithering is
@@ -271,7 +280,6 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
271 struct intel_crtc_config *pipe_config) 280 struct intel_crtc_config *pipe_config)
272{ 281{
273 struct drm_device *dev = intel_encoder->base.dev; 282 struct drm_device *dev = intel_encoder->base.dev;
274 struct drm_i915_private *dev_priv = dev->dev_private;
275 struct intel_lvds_encoder *lvds_encoder = 283 struct intel_lvds_encoder *lvds_encoder =
276 to_lvds_encoder(&intel_encoder->base); 284 to_lvds_encoder(&intel_encoder->base);
277 struct intel_connector *intel_connector = 285 struct intel_connector *intel_connector =
@@ -286,8 +294,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
286 return false; 294 return false;
287 } 295 }
288 296
289 if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) == 297 if (lvds_encoder->a3_power == LVDS_A3_POWER_UP)
290 LVDS_A3_POWER_UP)
291 lvds_bpp = 8*3; 298 lvds_bpp = 8*3;
292 else 299 else
293 lvds_bpp = 6*3; 300 lvds_bpp = 6*3;
@@ -1088,6 +1095,9 @@ out:
1088 DRM_DEBUG_KMS("detected %s-link lvds configuration\n", 1095 DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
1089 lvds_encoder->is_dual_link ? "dual" : "single"); 1096 lvds_encoder->is_dual_link ? "dual" : "single");
1090 1097
1098 lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
1099 LVDS_A3_POWER_MASK;
1100
1091 /* 1101 /*
1092 * Unlock registers and just 1102 * Unlock registers and just
1093 * leave them unlocked 1103 * leave them unlocked
@@ -1104,7 +1114,7 @@ out:
1104 DRM_DEBUG_KMS("lid notifier registration failed\n"); 1114 DRM_DEBUG_KMS("lid notifier registration failed\n");
1105 lvds_connector->lid_notifier.notifier_call = NULL; 1115 lvds_connector->lid_notifier.notifier_call = NULL;
1106 } 1116 }
1107 drm_sysfs_connector_add(connector); 1117 drm_connector_register(connector);
1108 1118
1109 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 1119 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
1110 intel_panel_setup_backlight(connector); 1120 intel_panel_setup_backlight(connector);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 4f6b53998d79..ca52ad2ae7d1 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -352,6 +352,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
352 case INTEL_OUTPUT_UNKNOWN: 352 case INTEL_OUTPUT_UNKNOWN:
353 case INTEL_OUTPUT_DISPLAYPORT: 353 case INTEL_OUTPUT_DISPLAYPORT:
354 case INTEL_OUTPUT_HDMI: 354 case INTEL_OUTPUT_HDMI:
355 case INTEL_OUTPUT_DP_MST:
355 type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL; 356 type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
356 break; 357 break;
357 case INTEL_OUTPUT_EDP: 358 case INTEL_OUTPUT_EDP:
@@ -427,7 +428,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
427 */ 428 */
428 DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); 429 DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
429 list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) 430 list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
430 intel_panel_set_backlight(intel_connector, bclp, 255); 431 intel_panel_set_backlight_acpi(intel_connector, bclp, 255);
431 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); 432 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
432 433
433 drm_modeset_unlock(&dev->mode_config.connection_mutex); 434 drm_modeset_unlock(&dev->mode_config.connection_mutex);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index daa118978eec..dc2f4f26c961 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -415,6 +415,10 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
415 } 415 }
416 416
417 intel_overlay_release_old_vid_tail(overlay); 417 intel_overlay_release_old_vid_tail(overlay);
418
419
420 i915_gem_track_fb(overlay->old_vid_bo, NULL,
421 INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
418 return 0; 422 return 0;
419} 423}
420 424
@@ -686,6 +690,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
686 bool scale_changed = false; 690 bool scale_changed = false;
687 struct drm_device *dev = overlay->dev; 691 struct drm_device *dev = overlay->dev;
688 u32 swidth, swidthsw, sheight, ostride; 692 u32 swidth, swidthsw, sheight, ostride;
693 enum pipe pipe = overlay->crtc->pipe;
689 694
690 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 695 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
691 BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 696 BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
@@ -713,7 +718,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
713 oconfig = OCONF_CC_OUT_8BIT; 718 oconfig = OCONF_CC_OUT_8BIT;
714 if (IS_GEN4(overlay->dev)) 719 if (IS_GEN4(overlay->dev))
715 oconfig |= OCONF_CSC_MODE_BT709; 720 oconfig |= OCONF_CSC_MODE_BT709;
716 oconfig |= overlay->crtc->pipe == 0 ? 721 oconfig |= pipe == 0 ?
717 OCONF_PIPE_A : OCONF_PIPE_B; 722 OCONF_PIPE_A : OCONF_PIPE_B;
718 iowrite32(oconfig, &regs->OCONFIG); 723 iowrite32(oconfig, &regs->OCONFIG);
719 intel_overlay_unmap_regs(overlay, regs); 724 intel_overlay_unmap_regs(overlay, regs);
@@ -776,9 +781,15 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
776 if (ret) 781 if (ret)
777 goto out_unpin; 782 goto out_unpin;
778 783
784 i915_gem_track_fb(overlay->vid_bo, new_bo,
785 INTEL_FRONTBUFFER_OVERLAY(pipe));
786
779 overlay->old_vid_bo = overlay->vid_bo; 787 overlay->old_vid_bo = overlay->vid_bo;
780 overlay->vid_bo = new_bo; 788 overlay->vid_bo = new_bo;
781 789
790 intel_frontbuffer_flip(dev,
791 INTEL_FRONTBUFFER_OVERLAY(pipe));
792
782 return 0; 793 return 0;
783 794
784out_unpin: 795out_unpin:
@@ -1028,7 +1039,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1028 struct drm_intel_overlay_put_image *put_image_rec = data; 1039 struct drm_intel_overlay_put_image *put_image_rec = data;
1029 struct drm_i915_private *dev_priv = dev->dev_private; 1040 struct drm_i915_private *dev_priv = dev->dev_private;
1030 struct intel_overlay *overlay; 1041 struct intel_overlay *overlay;
1031 struct drm_mode_object *drmmode_obj; 1042 struct drm_crtc *drmmode_crtc;
1032 struct intel_crtc *crtc; 1043 struct intel_crtc *crtc;
1033 struct drm_i915_gem_object *new_bo; 1044 struct drm_i915_gem_object *new_bo;
1034 struct put_image_params *params; 1045 struct put_image_params *params;
@@ -1057,13 +1068,12 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1057 if (!params) 1068 if (!params)
1058 return -ENOMEM; 1069 return -ENOMEM;
1059 1070
1060 drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id, 1071 drmmode_crtc = drm_crtc_find(dev, put_image_rec->crtc_id);
1061 DRM_MODE_OBJECT_CRTC); 1072 if (!drmmode_crtc) {
1062 if (!drmmode_obj) {
1063 ret = -ENOENT; 1073 ret = -ENOENT;
1064 goto out_free; 1074 goto out_free;
1065 } 1075 }
1066 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); 1076 crtc = to_intel_crtc(drmmode_crtc);
1067 1077
1068 new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv, 1078 new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
1069 put_image_rec->bo_handle)); 1079 put_image_rec->bo_handle));
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 12b02fe1d0ae..59b028f0b1e8 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -398,6 +398,69 @@ intel_panel_detect(struct drm_device *dev)
398 } 398 }
399} 399}
400 400
401/**
402 * scale - scale values from one range to another
403 *
404 * @source_val: value in range [@source_min..@source_max]
405 *
406 * Return @source_val in range [@source_min..@source_max] scaled to range
407 * [@target_min..@target_max].
408 */
409static uint32_t scale(uint32_t source_val,
410 uint32_t source_min, uint32_t source_max,
411 uint32_t target_min, uint32_t target_max)
412{
413 uint64_t target_val;
414
415 WARN_ON(source_min > source_max);
416 WARN_ON(target_min > target_max);
417
418 /* defensive */
419 source_val = clamp(source_val, source_min, source_max);
420
421 /* avoid overflows */
422 target_val = (uint64_t)(source_val - source_min) *
423 (target_max - target_min);
424 do_div(target_val, source_max - source_min);
425 target_val += target_min;
426
427 return target_val;
428}
429
430/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
431static inline u32 scale_user_to_hw(struct intel_connector *connector,
432 u32 user_level, u32 user_max)
433{
434 struct intel_panel *panel = &connector->panel;
435
436 return scale(user_level, 0, user_max,
437 panel->backlight.min, panel->backlight.max);
438}
439
440/* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
441 * to [hw_min..hw_max]. */
442static inline u32 clamp_user_to_hw(struct intel_connector *connector,
443 u32 user_level, u32 user_max)
444{
445 struct intel_panel *panel = &connector->panel;
446 u32 hw_level;
447
448 hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max);
449 hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max);
450
451 return hw_level;
452}
453
454/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
455static inline u32 scale_hw_to_user(struct intel_connector *connector,
456 u32 hw_level, u32 user_max)
457{
458 struct intel_panel *panel = &connector->panel;
459
460 return scale(hw_level, panel->backlight.min, panel->backlight.max,
461 0, user_max);
462}
463
401static u32 intel_panel_compute_brightness(struct intel_connector *connector, 464static u32 intel_panel_compute_brightness(struct intel_connector *connector,
402 u32 val) 465 u32 val)
403{ 466{
@@ -557,17 +620,16 @@ intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
557 dev_priv->display.set_backlight(connector, level); 620 dev_priv->display.set_backlight(connector, level);
558} 621}
559 622
560/* set backlight brightness to level in range [0..max] */ 623/* set backlight brightness to level in range [0..max], scaling wrt hw min */
561void intel_panel_set_backlight(struct intel_connector *connector, u32 level, 624static void intel_panel_set_backlight(struct intel_connector *connector,
562 u32 max) 625 u32 user_level, u32 user_max)
563{ 626{
564 struct drm_device *dev = connector->base.dev; 627 struct drm_device *dev = connector->base.dev;
565 struct drm_i915_private *dev_priv = dev->dev_private; 628 struct drm_i915_private *dev_priv = dev->dev_private;
566 struct intel_panel *panel = &connector->panel; 629 struct intel_panel *panel = &connector->panel;
567 enum pipe pipe = intel_get_pipe_from_connector(connector); 630 enum pipe pipe = intel_get_pipe_from_connector(connector);
568 u32 freq; 631 u32 hw_level;
569 unsigned long flags; 632 unsigned long flags;
570 u64 n;
571 633
572 if (!panel->backlight.present || pipe == INVALID_PIPE) 634 if (!panel->backlight.present || pipe == INVALID_PIPE)
573 return; 635 return;
@@ -576,18 +638,46 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
576 638
577 WARN_ON(panel->backlight.max == 0); 639 WARN_ON(panel->backlight.max == 0);
578 640
579 /* scale to hardware max, but be careful to not overflow */ 641 hw_level = scale_user_to_hw(connector, user_level, user_max);
580 freq = panel->backlight.max; 642 panel->backlight.level = hw_level;
581 n = (u64)level * freq; 643
582 do_div(n, max); 644 if (panel->backlight.enabled)
583 level = n; 645 intel_panel_actually_set_backlight(connector, hw_level);
646
647 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
648}
649
650/* set backlight brightness to level in range [0..max], assuming hw min is
651 * respected.
652 */
653void intel_panel_set_backlight_acpi(struct intel_connector *connector,
654 u32 user_level, u32 user_max)
655{
656 struct drm_device *dev = connector->base.dev;
657 struct drm_i915_private *dev_priv = dev->dev_private;
658 struct intel_panel *panel = &connector->panel;
659 enum pipe pipe = intel_get_pipe_from_connector(connector);
660 u32 hw_level;
661 unsigned long flags;
662
663 if (!panel->backlight.present || pipe == INVALID_PIPE)
664 return;
665
666 spin_lock_irqsave(&dev_priv->backlight_lock, flags);
667
668 WARN_ON(panel->backlight.max == 0);
669
670 hw_level = clamp_user_to_hw(connector, user_level, user_max);
671 panel->backlight.level = hw_level;
584 672
585 panel->backlight.level = level;
586 if (panel->backlight.device) 673 if (panel->backlight.device)
587 panel->backlight.device->props.brightness = level; 674 panel->backlight.device->props.brightness =
675 scale_hw_to_user(connector,
676 panel->backlight.level,
677 panel->backlight.device->props.max_brightness);
588 678
589 if (panel->backlight.enabled) 679 if (panel->backlight.enabled)
590 intel_panel_actually_set_backlight(connector, level); 680 intel_panel_actually_set_backlight(connector, hw_level);
591 681
592 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); 682 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
593} 683}
@@ -860,7 +950,9 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
860 panel->backlight.level = panel->backlight.max; 950 panel->backlight.level = panel->backlight.max;
861 if (panel->backlight.device) 951 if (panel->backlight.device)
862 panel->backlight.device->props.brightness = 952 panel->backlight.device->props.brightness =
863 panel->backlight.level; 953 scale_hw_to_user(connector,
954 panel->backlight.level,
955 panel->backlight.device->props.max_brightness);
864 } 956 }
865 957
866 dev_priv->display.enable_backlight(connector); 958 dev_priv->display.enable_backlight(connector);
@@ -889,11 +981,15 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
889 struct intel_connector *connector = bl_get_data(bd); 981 struct intel_connector *connector = bl_get_data(bd);
890 struct drm_device *dev = connector->base.dev; 982 struct drm_device *dev = connector->base.dev;
891 struct drm_i915_private *dev_priv = dev->dev_private; 983 struct drm_i915_private *dev_priv = dev->dev_private;
984 u32 hw_level;
892 int ret; 985 int ret;
893 986
894 intel_runtime_pm_get(dev_priv); 987 intel_runtime_pm_get(dev_priv);
895 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 988 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
896 ret = intel_panel_get_backlight(connector); 989
990 hw_level = intel_panel_get_backlight(connector);
991 ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness);
992
897 drm_modeset_unlock(&dev->mode_config.connection_mutex); 993 drm_modeset_unlock(&dev->mode_config.connection_mutex);
898 intel_runtime_pm_put(dev_priv); 994 intel_runtime_pm_put(dev_priv);
899 995
@@ -913,12 +1009,19 @@ static int intel_backlight_device_register(struct intel_connector *connector)
913 if (WARN_ON(panel->backlight.device)) 1009 if (WARN_ON(panel->backlight.device))
914 return -ENODEV; 1010 return -ENODEV;
915 1011
916 BUG_ON(panel->backlight.max == 0); 1012 WARN_ON(panel->backlight.max == 0);
917 1013
918 memset(&props, 0, sizeof(props)); 1014 memset(&props, 0, sizeof(props));
919 props.type = BACKLIGHT_RAW; 1015 props.type = BACKLIGHT_RAW;
920 props.brightness = panel->backlight.level; 1016
1017 /*
1018 * Note: Everything should work even if the backlight device max
1019 * presented to the userspace is arbitrarily chosen.
1020 */
921 props.max_brightness = panel->backlight.max; 1021 props.max_brightness = panel->backlight.max;
1022 props.brightness = scale_hw_to_user(connector,
1023 panel->backlight.level,
1024 props.max_brightness);
922 1025
923 /* 1026 /*
924 * Note: using the same name independent of the connector prevents 1027 * Note: using the same name independent of the connector prevents
@@ -964,6 +1067,19 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
964 * XXX: Query mode clock or hardware clock and program PWM modulation frequency 1067 * XXX: Query mode clock or hardware clock and program PWM modulation frequency
965 * appropriately when it's 0. Use VBT and/or sane defaults. 1068 * appropriately when it's 0. Use VBT and/or sane defaults.
966 */ 1069 */
1070static u32 get_backlight_min_vbt(struct intel_connector *connector)
1071{
1072 struct drm_device *dev = connector->base.dev;
1073 struct drm_i915_private *dev_priv = dev->dev_private;
1074 struct intel_panel *panel = &connector->panel;
1075
1076 WARN_ON(panel->backlight.max == 0);
1077
1078 /* vbt value is a coefficient in range [0..255] */
1079 return scale(dev_priv->vbt.backlight.min_brightness, 0, 255,
1080 0, panel->backlight.max);
1081}
1082
967static int bdw_setup_backlight(struct intel_connector *connector) 1083static int bdw_setup_backlight(struct intel_connector *connector)
968{ 1084{
969 struct drm_device *dev = connector->base.dev; 1085 struct drm_device *dev = connector->base.dev;
@@ -979,6 +1095,8 @@ static int bdw_setup_backlight(struct intel_connector *connector)
979 if (!panel->backlight.max) 1095 if (!panel->backlight.max)
980 return -ENODEV; 1096 return -ENODEV;
981 1097
1098 panel->backlight.min = get_backlight_min_vbt(connector);
1099
982 val = bdw_get_backlight(connector); 1100 val = bdw_get_backlight(connector);
983 panel->backlight.level = intel_panel_compute_brightness(connector, val); 1101 panel->backlight.level = intel_panel_compute_brightness(connector, val);
984 1102
@@ -1003,6 +1121,8 @@ static int pch_setup_backlight(struct intel_connector *connector)
1003 if (!panel->backlight.max) 1121 if (!panel->backlight.max)
1004 return -ENODEV; 1122 return -ENODEV;
1005 1123
1124 panel->backlight.min = get_backlight_min_vbt(connector);
1125
1006 val = pch_get_backlight(connector); 1126 val = pch_get_backlight(connector);
1007 panel->backlight.level = intel_panel_compute_brightness(connector, val); 1127 panel->backlight.level = intel_panel_compute_brightness(connector, val);
1008 1128
@@ -1035,6 +1155,8 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
1035 if (!panel->backlight.max) 1155 if (!panel->backlight.max)
1036 return -ENODEV; 1156 return -ENODEV;
1037 1157
1158 panel->backlight.min = get_backlight_min_vbt(connector);
1159
1038 val = i9xx_get_backlight(connector); 1160 val = i9xx_get_backlight(connector);
1039 panel->backlight.level = intel_panel_compute_brightness(connector, val); 1161 panel->backlight.level = intel_panel_compute_brightness(connector, val);
1040 1162
@@ -1062,6 +1184,8 @@ static int i965_setup_backlight(struct intel_connector *connector)
1062 if (!panel->backlight.max) 1184 if (!panel->backlight.max)
1063 return -ENODEV; 1185 return -ENODEV;
1064 1186
1187 panel->backlight.min = get_backlight_min_vbt(connector);
1188
1065 val = i9xx_get_backlight(connector); 1189 val = i9xx_get_backlight(connector);
1066 panel->backlight.level = intel_panel_compute_brightness(connector, val); 1190 panel->backlight.level = intel_panel_compute_brightness(connector, val);
1067 1191
@@ -1099,6 +1223,8 @@ static int vlv_setup_backlight(struct intel_connector *connector)
1099 if (!panel->backlight.max) 1223 if (!panel->backlight.max)
1100 return -ENODEV; 1224 return -ENODEV;
1101 1225
1226 panel->backlight.min = get_backlight_min_vbt(connector);
1227
1102 val = _vlv_get_backlight(dev, PIPE_A); 1228 val = _vlv_get_backlight(dev, PIPE_A);
1103 panel->backlight.level = intel_panel_compute_brightness(connector, val); 1229 panel->backlight.level = intel_panel_compute_brightness(connector, val);
1104 1230
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f1233f544f3e..c3bb925b2e65 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -93,8 +93,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
93 struct drm_device *dev = crtc->dev; 93 struct drm_device *dev = crtc->dev;
94 struct drm_i915_private *dev_priv = dev->dev_private; 94 struct drm_i915_private *dev_priv = dev->dev_private;
95 struct drm_framebuffer *fb = crtc->primary->fb; 95 struct drm_framebuffer *fb = crtc->primary->fb;
96 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 96 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
97 struct drm_i915_gem_object *obj = intel_fb->obj;
98 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 97 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
99 int cfb_pitch; 98 int cfb_pitch;
100 int i; 99 int i;
@@ -150,8 +149,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
150 struct drm_device *dev = crtc->dev; 149 struct drm_device *dev = crtc->dev;
151 struct drm_i915_private *dev_priv = dev->dev_private; 150 struct drm_i915_private *dev_priv = dev->dev_private;
152 struct drm_framebuffer *fb = crtc->primary->fb; 151 struct drm_framebuffer *fb = crtc->primary->fb;
153 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 152 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
154 struct drm_i915_gem_object *obj = intel_fb->obj;
155 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 153 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
156 u32 dpfc_ctl; 154 u32 dpfc_ctl;
157 155
@@ -222,16 +220,26 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
222 struct drm_device *dev = crtc->dev; 220 struct drm_device *dev = crtc->dev;
223 struct drm_i915_private *dev_priv = dev->dev_private; 221 struct drm_i915_private *dev_priv = dev->dev_private;
224 struct drm_framebuffer *fb = crtc->primary->fb; 222 struct drm_framebuffer *fb = crtc->primary->fb;
225 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 223 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
226 struct drm_i915_gem_object *obj = intel_fb->obj;
227 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 224 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
228 u32 dpfc_ctl; 225 u32 dpfc_ctl;
229 226
230 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane); 227 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
231 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 228 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
229 dev_priv->fbc.threshold++;
230
231 switch (dev_priv->fbc.threshold) {
232 case 4:
233 case 3:
234 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
235 break;
236 case 2:
232 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 237 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
233 else 238 break;
239 case 1:
234 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 240 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
241 break;
242 }
235 dpfc_ctl |= DPFC_CTL_FENCE_EN; 243 dpfc_ctl |= DPFC_CTL_FENCE_EN;
236 if (IS_GEN5(dev)) 244 if (IS_GEN5(dev))
237 dpfc_ctl |= obj->fence_reg; 245 dpfc_ctl |= obj->fence_reg;
@@ -278,16 +286,27 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
278 struct drm_device *dev = crtc->dev; 286 struct drm_device *dev = crtc->dev;
279 struct drm_i915_private *dev_priv = dev->dev_private; 287 struct drm_i915_private *dev_priv = dev->dev_private;
280 struct drm_framebuffer *fb = crtc->primary->fb; 288 struct drm_framebuffer *fb = crtc->primary->fb;
281 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 289 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
282 struct drm_i915_gem_object *obj = intel_fb->obj;
283 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 290 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
284 u32 dpfc_ctl; 291 u32 dpfc_ctl;
285 292
286 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane); 293 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
287 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 294 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
295 dev_priv->fbc.threshold++;
296
297 switch (dev_priv->fbc.threshold) {
298 case 4:
299 case 3:
300 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
301 break;
302 case 2:
288 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 303 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
289 else 304 break;
305 case 1:
290 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 306 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
307 break;
308 }
309
291 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; 310 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
292 311
293 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 312 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -462,7 +481,6 @@ void intel_update_fbc(struct drm_device *dev)
462 struct drm_crtc *crtc = NULL, *tmp_crtc; 481 struct drm_crtc *crtc = NULL, *tmp_crtc;
463 struct intel_crtc *intel_crtc; 482 struct intel_crtc *intel_crtc;
464 struct drm_framebuffer *fb; 483 struct drm_framebuffer *fb;
465 struct intel_framebuffer *intel_fb;
466 struct drm_i915_gem_object *obj; 484 struct drm_i915_gem_object *obj;
467 const struct drm_display_mode *adjusted_mode; 485 const struct drm_display_mode *adjusted_mode;
468 unsigned int max_width, max_height; 486 unsigned int max_width, max_height;
@@ -507,8 +525,7 @@ void intel_update_fbc(struct drm_device *dev)
507 525
508 intel_crtc = to_intel_crtc(crtc); 526 intel_crtc = to_intel_crtc(crtc);
509 fb = crtc->primary->fb; 527 fb = crtc->primary->fb;
510 intel_fb = to_intel_framebuffer(fb); 528 obj = intel_fb_obj(fb);
511 obj = intel_fb->obj;
512 adjusted_mode = &intel_crtc->config.adjusted_mode; 529 adjusted_mode = &intel_crtc->config.adjusted_mode;
513 530
514 if (i915.enable_fbc < 0) { 531 if (i915.enable_fbc < 0) {
@@ -529,7 +546,10 @@ void intel_update_fbc(struct drm_device *dev)
529 goto out_disable; 546 goto out_disable;
530 } 547 }
531 548
532 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 549 if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
550 max_width = 4096;
551 max_height = 4096;
552 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
533 max_width = 4096; 553 max_width = 4096;
534 max_height = 2048; 554 max_height = 2048;
535 } else { 555 } else {
@@ -563,7 +583,8 @@ void intel_update_fbc(struct drm_device *dev)
563 if (in_dbg_master()) 583 if (in_dbg_master())
564 goto out_disable; 584 goto out_disable;
565 585
566 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) { 586 if (i915_gem_stolen_setup_compression(dev, obj->base.size,
587 drm_format_plane_cpp(fb->pixel_format, 0))) {
567 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL)) 588 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
568 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); 589 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
569 goto out_disable; 590 goto out_disable;
@@ -789,12 +810,33 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
789 return NULL; 810 return NULL;
790} 811}
791 812
792static void pineview_disable_cxsr(struct drm_device *dev) 813void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
793{ 814{
794 struct drm_i915_private *dev_priv = dev->dev_private; 815 struct drm_device *dev = dev_priv->dev;
816 u32 val;
817
818 if (IS_VALLEYVIEW(dev)) {
819 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
820 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
821 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
822 } else if (IS_PINEVIEW(dev)) {
823 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
824 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
825 I915_WRITE(DSPFW3, val);
826 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
827 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
828 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
829 I915_WRITE(FW_BLC_SELF, val);
830 } else if (IS_I915GM(dev)) {
831 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
832 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
833 I915_WRITE(INSTPM, val);
834 } else {
835 return;
836 }
795 837
796 /* deactivate cxsr */ 838 DRM_DEBUG_KMS("memory self-refresh is %s\n",
797 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); 839 enable ? "enabled" : "disabled");
798} 840}
799 841
800/* 842/*
@@ -864,95 +906,95 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
864 906
865/* Pineview has different values for various configs */ 907/* Pineview has different values for various configs */
866static const struct intel_watermark_params pineview_display_wm = { 908static const struct intel_watermark_params pineview_display_wm = {
867 PINEVIEW_DISPLAY_FIFO, 909 .fifo_size = PINEVIEW_DISPLAY_FIFO,
868 PINEVIEW_MAX_WM, 910 .max_wm = PINEVIEW_MAX_WM,
869 PINEVIEW_DFT_WM, 911 .default_wm = PINEVIEW_DFT_WM,
870 PINEVIEW_GUARD_WM, 912 .guard_size = PINEVIEW_GUARD_WM,
871 PINEVIEW_FIFO_LINE_SIZE 913 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
872}; 914};
873static const struct intel_watermark_params pineview_display_hplloff_wm = { 915static const struct intel_watermark_params pineview_display_hplloff_wm = {
874 PINEVIEW_DISPLAY_FIFO, 916 .fifo_size = PINEVIEW_DISPLAY_FIFO,
875 PINEVIEW_MAX_WM, 917 .max_wm = PINEVIEW_MAX_WM,
876 PINEVIEW_DFT_HPLLOFF_WM, 918 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
877 PINEVIEW_GUARD_WM, 919 .guard_size = PINEVIEW_GUARD_WM,
878 PINEVIEW_FIFO_LINE_SIZE 920 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
879}; 921};
880static const struct intel_watermark_params pineview_cursor_wm = { 922static const struct intel_watermark_params pineview_cursor_wm = {
881 PINEVIEW_CURSOR_FIFO, 923 .fifo_size = PINEVIEW_CURSOR_FIFO,
882 PINEVIEW_CURSOR_MAX_WM, 924 .max_wm = PINEVIEW_CURSOR_MAX_WM,
883 PINEVIEW_CURSOR_DFT_WM, 925 .default_wm = PINEVIEW_CURSOR_DFT_WM,
884 PINEVIEW_CURSOR_GUARD_WM, 926 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
885 PINEVIEW_FIFO_LINE_SIZE, 927 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
886}; 928};
887static const struct intel_watermark_params pineview_cursor_hplloff_wm = { 929static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
888 PINEVIEW_CURSOR_FIFO, 930 .fifo_size = PINEVIEW_CURSOR_FIFO,
889 PINEVIEW_CURSOR_MAX_WM, 931 .max_wm = PINEVIEW_CURSOR_MAX_WM,
890 PINEVIEW_CURSOR_DFT_WM, 932 .default_wm = PINEVIEW_CURSOR_DFT_WM,
891 PINEVIEW_CURSOR_GUARD_WM, 933 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
892 PINEVIEW_FIFO_LINE_SIZE 934 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
893}; 935};
894static const struct intel_watermark_params g4x_wm_info = { 936static const struct intel_watermark_params g4x_wm_info = {
895 G4X_FIFO_SIZE, 937 .fifo_size = G4X_FIFO_SIZE,
896 G4X_MAX_WM, 938 .max_wm = G4X_MAX_WM,
897 G4X_MAX_WM, 939 .default_wm = G4X_MAX_WM,
898 2, 940 .guard_size = 2,
899 G4X_FIFO_LINE_SIZE, 941 .cacheline_size = G4X_FIFO_LINE_SIZE,
900}; 942};
901static const struct intel_watermark_params g4x_cursor_wm_info = { 943static const struct intel_watermark_params g4x_cursor_wm_info = {
902 I965_CURSOR_FIFO, 944 .fifo_size = I965_CURSOR_FIFO,
903 I965_CURSOR_MAX_WM, 945 .max_wm = I965_CURSOR_MAX_WM,
904 I965_CURSOR_DFT_WM, 946 .default_wm = I965_CURSOR_DFT_WM,
905 2, 947 .guard_size = 2,
906 G4X_FIFO_LINE_SIZE, 948 .cacheline_size = G4X_FIFO_LINE_SIZE,
907}; 949};
908static const struct intel_watermark_params valleyview_wm_info = { 950static const struct intel_watermark_params valleyview_wm_info = {
909 VALLEYVIEW_FIFO_SIZE, 951 .fifo_size = VALLEYVIEW_FIFO_SIZE,
910 VALLEYVIEW_MAX_WM, 952 .max_wm = VALLEYVIEW_MAX_WM,
911 VALLEYVIEW_MAX_WM, 953 .default_wm = VALLEYVIEW_MAX_WM,
912 2, 954 .guard_size = 2,
913 G4X_FIFO_LINE_SIZE, 955 .cacheline_size = G4X_FIFO_LINE_SIZE,
914}; 956};
915static const struct intel_watermark_params valleyview_cursor_wm_info = { 957static const struct intel_watermark_params valleyview_cursor_wm_info = {
916 I965_CURSOR_FIFO, 958 .fifo_size = I965_CURSOR_FIFO,
917 VALLEYVIEW_CURSOR_MAX_WM, 959 .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
918 I965_CURSOR_DFT_WM, 960 .default_wm = I965_CURSOR_DFT_WM,
919 2, 961 .guard_size = 2,
920 G4X_FIFO_LINE_SIZE, 962 .cacheline_size = G4X_FIFO_LINE_SIZE,
921}; 963};
922static const struct intel_watermark_params i965_cursor_wm_info = { 964static const struct intel_watermark_params i965_cursor_wm_info = {
923 I965_CURSOR_FIFO, 965 .fifo_size = I965_CURSOR_FIFO,
924 I965_CURSOR_MAX_WM, 966 .max_wm = I965_CURSOR_MAX_WM,
925 I965_CURSOR_DFT_WM, 967 .default_wm = I965_CURSOR_DFT_WM,
926 2, 968 .guard_size = 2,
927 I915_FIFO_LINE_SIZE, 969 .cacheline_size = I915_FIFO_LINE_SIZE,
928}; 970};
929static const struct intel_watermark_params i945_wm_info = { 971static const struct intel_watermark_params i945_wm_info = {
930 I945_FIFO_SIZE, 972 .fifo_size = I945_FIFO_SIZE,
931 I915_MAX_WM, 973 .max_wm = I915_MAX_WM,
932 1, 974 .default_wm = 1,
933 2, 975 .guard_size = 2,
934 I915_FIFO_LINE_SIZE 976 .cacheline_size = I915_FIFO_LINE_SIZE,
935}; 977};
936static const struct intel_watermark_params i915_wm_info = { 978static const struct intel_watermark_params i915_wm_info = {
937 I915_FIFO_SIZE, 979 .fifo_size = I915_FIFO_SIZE,
938 I915_MAX_WM, 980 .max_wm = I915_MAX_WM,
939 1, 981 .default_wm = 1,
940 2, 982 .guard_size = 2,
941 I915_FIFO_LINE_SIZE 983 .cacheline_size = I915_FIFO_LINE_SIZE,
942}; 984};
943static const struct intel_watermark_params i830_wm_info = { 985static const struct intel_watermark_params i830_wm_info = {
944 I855GM_FIFO_SIZE, 986 .fifo_size = I855GM_FIFO_SIZE,
945 I915_MAX_WM, 987 .max_wm = I915_MAX_WM,
946 1, 988 .default_wm = 1,
947 2, 989 .guard_size = 2,
948 I830_FIFO_LINE_SIZE 990 .cacheline_size = I830_FIFO_LINE_SIZE,
949}; 991};
950static const struct intel_watermark_params i845_wm_info = { 992static const struct intel_watermark_params i845_wm_info = {
951 I830_FIFO_SIZE, 993 .fifo_size = I830_FIFO_SIZE,
952 I915_MAX_WM, 994 .max_wm = I915_MAX_WM,
953 1, 995 .default_wm = 1,
954 2, 996 .guard_size = 2,
955 I830_FIFO_LINE_SIZE 997 .cacheline_size = I830_FIFO_LINE_SIZE,
956}; 998};
957 999
958/** 1000/**
@@ -1033,7 +1075,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
1033 dev_priv->fsb_freq, dev_priv->mem_freq); 1075 dev_priv->fsb_freq, dev_priv->mem_freq);
1034 if (!latency) { 1076 if (!latency) {
1035 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 1077 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1036 pineview_disable_cxsr(dev); 1078 intel_set_memory_cxsr(dev_priv, false);
1037 return; 1079 return;
1038 } 1080 }
1039 1081
@@ -1084,13 +1126,9 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
1084 I915_WRITE(DSPFW3, reg); 1126 I915_WRITE(DSPFW3, reg);
1085 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); 1127 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1086 1128
1087 /* activate cxsr */ 1129 intel_set_memory_cxsr(dev_priv, true);
1088 I915_WRITE(DSPFW3,
1089 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1090 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1091 } else { 1130 } else {
1092 pineview_disable_cxsr(dev); 1131 intel_set_memory_cxsr(dev_priv, false);
1093 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1094 } 1132 }
1095} 1133}
1096 1134
@@ -1316,6 +1354,7 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
1316 int plane_sr, cursor_sr; 1354 int plane_sr, cursor_sr;
1317 int ignore_plane_sr, ignore_cursor_sr; 1355 int ignore_plane_sr, ignore_cursor_sr;
1318 unsigned int enabled = 0; 1356 unsigned int enabled = 0;
1357 bool cxsr_enabled;
1319 1358
1320 vlv_update_drain_latency(dev); 1359 vlv_update_drain_latency(dev);
1321 1360
@@ -1342,10 +1381,10 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
1342 &valleyview_wm_info, 1381 &valleyview_wm_info,
1343 &valleyview_cursor_wm_info, 1382 &valleyview_cursor_wm_info,
1344 &ignore_plane_sr, &cursor_sr)) { 1383 &ignore_plane_sr, &cursor_sr)) {
1345 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN); 1384 cxsr_enabled = true;
1346 } else { 1385 } else {
1347 I915_WRITE(FW_BLC_SELF_VLV, 1386 cxsr_enabled = false;
1348 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN); 1387 intel_set_memory_cxsr(dev_priv, false);
1349 plane_sr = cursor_sr = 0; 1388 plane_sr = cursor_sr = 0;
1350 } 1389 }
1351 1390
@@ -1365,6 +1404,9 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
1365 I915_WRITE(DSPFW3, 1404 I915_WRITE(DSPFW3,
1366 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) | 1405 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1367 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1406 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1407
1408 if (cxsr_enabled)
1409 intel_set_memory_cxsr(dev_priv, true);
1368} 1410}
1369 1411
1370static void g4x_update_wm(struct drm_crtc *crtc) 1412static void g4x_update_wm(struct drm_crtc *crtc)
@@ -1375,6 +1417,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
1375 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1417 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1376 int plane_sr, cursor_sr; 1418 int plane_sr, cursor_sr;
1377 unsigned int enabled = 0; 1419 unsigned int enabled = 0;
1420 bool cxsr_enabled;
1378 1421
1379 if (g4x_compute_wm0(dev, PIPE_A, 1422 if (g4x_compute_wm0(dev, PIPE_A,
1380 &g4x_wm_info, latency_ns, 1423 &g4x_wm_info, latency_ns,
@@ -1394,10 +1437,10 @@ static void g4x_update_wm(struct drm_crtc *crtc)
1394 &g4x_wm_info, 1437 &g4x_wm_info,
1395 &g4x_cursor_wm_info, 1438 &g4x_cursor_wm_info,
1396 &plane_sr, &cursor_sr)) { 1439 &plane_sr, &cursor_sr)) {
1397 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 1440 cxsr_enabled = true;
1398 } else { 1441 } else {
1399 I915_WRITE(FW_BLC_SELF, 1442 cxsr_enabled = false;
1400 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); 1443 intel_set_memory_cxsr(dev_priv, false);
1401 plane_sr = cursor_sr = 0; 1444 plane_sr = cursor_sr = 0;
1402 } 1445 }
1403 1446
@@ -1418,6 +1461,9 @@ static void g4x_update_wm(struct drm_crtc *crtc)
1418 I915_WRITE(DSPFW3, 1461 I915_WRITE(DSPFW3,
1419 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) | 1462 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1420 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1463 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1464
1465 if (cxsr_enabled)
1466 intel_set_memory_cxsr(dev_priv, true);
1421} 1467}
1422 1468
1423static void i965_update_wm(struct drm_crtc *unused_crtc) 1469static void i965_update_wm(struct drm_crtc *unused_crtc)
@@ -1427,6 +1473,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1427 struct drm_crtc *crtc; 1473 struct drm_crtc *crtc;
1428 int srwm = 1; 1474 int srwm = 1;
1429 int cursor_sr = 16; 1475 int cursor_sr = 16;
1476 bool cxsr_enabled;
1430 1477
1431 /* Calc sr entries for one plane configs */ 1478 /* Calc sr entries for one plane configs */
1432 crtc = single_enabled_crtc(dev); 1479 crtc = single_enabled_crtc(dev);
@@ -1468,13 +1515,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1468 DRM_DEBUG_KMS("self-refresh watermark: display plane %d " 1515 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1469 "cursor %d\n", srwm, cursor_sr); 1516 "cursor %d\n", srwm, cursor_sr);
1470 1517
1471 if (IS_CRESTLINE(dev)) 1518 cxsr_enabled = true;
1472 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1473 } else { 1519 } else {
1520 cxsr_enabled = false;
1474 /* Turn off self refresh if both pipes are enabled */ 1521 /* Turn off self refresh if both pipes are enabled */
1475 if (IS_CRESTLINE(dev)) 1522 intel_set_memory_cxsr(dev_priv, false);
1476 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1477 & ~FW_BLC_SELF_EN);
1478 } 1523 }
1479 1524
1480 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 1525 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -1486,6 +1531,9 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1486 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); 1531 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1487 /* update cursor SR watermark */ 1532 /* update cursor SR watermark */
1488 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1533 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1534
1535 if (cxsr_enabled)
1536 intel_set_memory_cxsr(dev_priv, true);
1489} 1537}
1490 1538
1491static void i9xx_update_wm(struct drm_crtc *unused_crtc) 1539static void i9xx_update_wm(struct drm_crtc *unused_crtc)
@@ -1545,12 +1593,12 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1545 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 1593 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1546 1594
1547 if (IS_I915GM(dev) && enabled) { 1595 if (IS_I915GM(dev) && enabled) {
1548 struct intel_framebuffer *fb; 1596 struct drm_i915_gem_object *obj;
1549 1597
1550 fb = to_intel_framebuffer(enabled->primary->fb); 1598 obj = intel_fb_obj(enabled->primary->fb);
1551 1599
1552 /* self-refresh seems busted with untiled */ 1600 /* self-refresh seems busted with untiled */
1553 if (fb->obj->tiling_mode == I915_TILING_NONE) 1601 if (obj->tiling_mode == I915_TILING_NONE)
1554 enabled = NULL; 1602 enabled = NULL;
1555 } 1603 }
1556 1604
@@ -1560,10 +1608,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1560 cwm = 2; 1608 cwm = 2;
1561 1609
1562 /* Play safe and disable self-refresh before adjusting watermarks. */ 1610 /* Play safe and disable self-refresh before adjusting watermarks. */
1563 if (IS_I945G(dev) || IS_I945GM(dev)) 1611 intel_set_memory_cxsr(dev_priv, false);
1564 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1565 else if (IS_I915GM(dev))
1566 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
1567 1612
1568 /* Calc sr entries for one plane configs */ 1613 /* Calc sr entries for one plane configs */
1569 if (HAS_FW_BLC(dev) && enabled) { 1614 if (HAS_FW_BLC(dev) && enabled) {
@@ -1609,17 +1654,8 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1609 I915_WRITE(FW_BLC, fwater_lo); 1654 I915_WRITE(FW_BLC, fwater_lo);
1610 I915_WRITE(FW_BLC2, fwater_hi); 1655 I915_WRITE(FW_BLC2, fwater_hi);
1611 1656
1612 if (HAS_FW_BLC(dev)) { 1657 if (enabled)
1613 if (enabled) { 1658 intel_set_memory_cxsr(dev_priv, true);
1614 if (IS_I945G(dev) || IS_I945GM(dev))
1615 I915_WRITE(FW_BLC_SELF,
1616 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1617 else if (IS_I915GM(dev))
1618 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
1619 DRM_DEBUG_KMS("memory self refresh enabled\n");
1620 } else
1621 DRM_DEBUG_KMS("memory self refresh disabled\n");
1622 }
1623} 1659}
1624 1660
1625static void i845_update_wm(struct drm_crtc *unused_crtc) 1661static void i845_update_wm(struct drm_crtc *unused_crtc)
@@ -2707,10 +2743,11 @@ static void ilk_update_wm(struct drm_crtc *crtc)
2707 ilk_write_wm_values(dev_priv, &results); 2743 ilk_write_wm_values(dev_priv, &results);
2708} 2744}
2709 2745
2710static void ilk_update_sprite_wm(struct drm_plane *plane, 2746static void
2711 struct drm_crtc *crtc, 2747ilk_update_sprite_wm(struct drm_plane *plane,
2712 uint32_t sprite_width, int pixel_size, 2748 struct drm_crtc *crtc,
2713 bool enabled, bool scaled) 2749 uint32_t sprite_width, uint32_t sprite_height,
2750 int pixel_size, bool enabled, bool scaled)
2714{ 2751{
2715 struct drm_device *dev = plane->dev; 2752 struct drm_device *dev = plane->dev;
2716 struct intel_plane *intel_plane = to_intel_plane(plane); 2753 struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -2718,6 +2755,7 @@ static void ilk_update_sprite_wm(struct drm_plane *plane,
2718 intel_plane->wm.enabled = enabled; 2755 intel_plane->wm.enabled = enabled;
2719 intel_plane->wm.scaled = scaled; 2756 intel_plane->wm.scaled = scaled;
2720 intel_plane->wm.horiz_pixels = sprite_width; 2757 intel_plane->wm.horiz_pixels = sprite_width;
2758 intel_plane->wm.vert_pixels = sprite_width;
2721 intel_plane->wm.bytes_per_pixel = pixel_size; 2759 intel_plane->wm.bytes_per_pixel = pixel_size;
2722 2760
2723 /* 2761 /*
@@ -2852,13 +2890,16 @@ void intel_update_watermarks(struct drm_crtc *crtc)
2852 2890
2853void intel_update_sprite_watermarks(struct drm_plane *plane, 2891void intel_update_sprite_watermarks(struct drm_plane *plane,
2854 struct drm_crtc *crtc, 2892 struct drm_crtc *crtc,
2855 uint32_t sprite_width, int pixel_size, 2893 uint32_t sprite_width,
2894 uint32_t sprite_height,
2895 int pixel_size,
2856 bool enabled, bool scaled) 2896 bool enabled, bool scaled)
2857{ 2897{
2858 struct drm_i915_private *dev_priv = plane->dev->dev_private; 2898 struct drm_i915_private *dev_priv = plane->dev->dev_private;
2859 2899
2860 if (dev_priv->display.update_sprite_wm) 2900 if (dev_priv->display.update_sprite_wm)
2861 dev_priv->display.update_sprite_wm(plane, crtc, sprite_width, 2901 dev_priv->display.update_sprite_wm(plane, crtc,
2902 sprite_width, sprite_height,
2862 pixel_size, enabled, scaled); 2903 pixel_size, enabled, scaled);
2863} 2904}
2864 2905
@@ -3147,6 +3188,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3147 if (val < dev_priv->rps.max_freq_softlimit) 3188 if (val < dev_priv->rps.max_freq_softlimit)
3148 mask |= GEN6_PM_RP_UP_THRESHOLD; 3189 mask |= GEN6_PM_RP_UP_THRESHOLD;
3149 3190
3191 mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
3192 mask &= dev_priv->pm_rps_events;
3193
3150 /* IVB and SNB hard hangs on looping batchbuffer 3194 /* IVB and SNB hard hangs on looping batchbuffer
3151 * if GEN6_PM_UP_EI_EXPIRED is masked. 3195 * if GEN6_PM_UP_EI_EXPIRED is masked.
3152 */ 3196 */
@@ -3250,7 +3294,9 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
3250 3294
3251 mutex_lock(&dev_priv->rps.hw_lock); 3295 mutex_lock(&dev_priv->rps.hw_lock);
3252 if (dev_priv->rps.enabled) { 3296 if (dev_priv->rps.enabled) {
3253 if (IS_VALLEYVIEW(dev)) 3297 if (IS_CHERRYVIEW(dev))
3298 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3299 else if (IS_VALLEYVIEW(dev))
3254 vlv_set_rps_idle(dev_priv); 3300 vlv_set_rps_idle(dev_priv);
3255 else 3301 else
3256 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 3302 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
@@ -3348,6 +3394,15 @@ static void gen6_disable_rps(struct drm_device *dev)
3348 gen6_disable_rps_interrupts(dev); 3394 gen6_disable_rps_interrupts(dev);
3349} 3395}
3350 3396
3397static void cherryview_disable_rps(struct drm_device *dev)
3398{
3399 struct drm_i915_private *dev_priv = dev->dev_private;
3400
3401 I915_WRITE(GEN6_RC_CONTROL, 0);
3402
3403 gen8_disable_rps_interrupts(dev);
3404}
3405
3351static void valleyview_disable_rps(struct drm_device *dev) 3406static void valleyview_disable_rps(struct drm_device *dev)
3352{ 3407{
3353 struct drm_i915_private *dev_priv = dev->dev_private; 3408 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3419,7 +3474,7 @@ static void gen8_enable_rps_interrupts(struct drm_device *dev)
3419 3474
3420 spin_lock_irq(&dev_priv->irq_lock); 3475 spin_lock_irq(&dev_priv->irq_lock);
3421 WARN_ON(dev_priv->rps.pm_iir); 3476 WARN_ON(dev_priv->rps.pm_iir);
3422 bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 3477 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3423 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events); 3478 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3424 spin_unlock_irq(&dev_priv->irq_lock); 3479 spin_unlock_irq(&dev_priv->irq_lock);
3425} 3480}
@@ -3430,7 +3485,7 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
3430 3485
3431 spin_lock_irq(&dev_priv->irq_lock); 3486 spin_lock_irq(&dev_priv->irq_lock);
3432 WARN_ON(dev_priv->rps.pm_iir); 3487 WARN_ON(dev_priv->rps.pm_iir);
3433 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 3488 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3434 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events); 3489 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3435 spin_unlock_irq(&dev_priv->irq_lock); 3490 spin_unlock_irq(&dev_priv->irq_lock);
3436} 3491}
@@ -3483,15 +3538,23 @@ static void gen8_enable_rps(struct drm_device *dev)
3483 for_each_ring(ring, dev_priv, unused) 3538 for_each_ring(ring, dev_priv, unused)
3484 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 3539 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3485 I915_WRITE(GEN6_RC_SLEEP, 0); 3540 I915_WRITE(GEN6_RC_SLEEP, 0);
3486 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ 3541 if (IS_BROADWELL(dev))
3542 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
3543 else
3544 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
3487 3545
3488 /* 3: Enable RC6 */ 3546 /* 3: Enable RC6 */
3489 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 3547 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3490 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 3548 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
3491 intel_print_rc6_info(dev, rc6_mask); 3549 intel_print_rc6_info(dev, rc6_mask);
3492 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 3550 if (IS_BROADWELL(dev))
3493 GEN6_RC_CTL_EI_MODE(1) | 3551 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3494 rc6_mask); 3552 GEN7_RC_CTL_TO_MODE |
3553 rc6_mask);
3554 else
3555 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3556 GEN6_RC_CTL_EI_MODE(1) |
3557 rc6_mask);
3495 3558
3496 /* 4 Program defaults and thresholds for RPS*/ 3559 /* 4 Program defaults and thresholds for RPS*/
3497 I915_WRITE(GEN6_RPNSWREQ, 3560 I915_WRITE(GEN6_RPNSWREQ,
@@ -3727,7 +3790,57 @@ void gen6_update_ring_freq(struct drm_device *dev)
3727 mutex_unlock(&dev_priv->rps.hw_lock); 3790 mutex_unlock(&dev_priv->rps.hw_lock);
3728} 3791}
3729 3792
3730int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) 3793static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
3794{
3795 u32 val, rp0;
3796
3797 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
3798 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
3799
3800 return rp0;
3801}
3802
3803static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3804{
3805 u32 val, rpe;
3806
3807 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
3808 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
3809
3810 return rpe;
3811}
3812
3813static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
3814{
3815 u32 val, rp1;
3816
3817 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3818 rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
3819
3820 return rp1;
3821}
3822
3823static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
3824{
3825 u32 val, rpn;
3826
3827 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
3828 rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
3829 return rpn;
3830}
3831
3832static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
3833{
3834 u32 val, rp1;
3835
3836 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
3837
3838 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
3839
3840 return rp1;
3841}
3842
3843static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3731{ 3844{
3732 u32 val, rp0; 3845 u32 val, rp0;
3733 3846
@@ -3752,7 +3865,7 @@ static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3752 return rpe; 3865 return rpe;
3753} 3866}
3754 3867
3755int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) 3868static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3756{ 3869{
3757 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; 3870 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3758} 3871}
@@ -3766,6 +3879,35 @@ static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
3766 dev_priv->vlv_pctx->stolen->start); 3879 dev_priv->vlv_pctx->stolen->start);
3767} 3880}
3768 3881
3882
3883/* Check that the pcbr address is not empty. */
3884static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
3885{
3886 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
3887
3888 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
3889}
3890
3891static void cherryview_setup_pctx(struct drm_device *dev)
3892{
3893 struct drm_i915_private *dev_priv = dev->dev_private;
3894 unsigned long pctx_paddr, paddr;
3895 struct i915_gtt *gtt = &dev_priv->gtt;
3896 u32 pcbr;
3897 int pctx_size = 32*1024;
3898
3899 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3900
3901 pcbr = I915_READ(VLV_PCBR);
3902 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
3903 paddr = (dev_priv->mm.stolen_base +
3904 (gtt->stolen_size - pctx_size));
3905
3906 pctx_paddr = (paddr & (~4095));
3907 I915_WRITE(VLV_PCBR, pctx_paddr);
3908 }
3909}
3910
3769static void valleyview_setup_pctx(struct drm_device *dev) 3911static void valleyview_setup_pctx(struct drm_device *dev)
3770{ 3912{
3771 struct drm_i915_private *dev_priv = dev->dev_private; 3913 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3840,6 +3982,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
3840 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 3982 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3841 dev_priv->rps.efficient_freq); 3983 dev_priv->rps.efficient_freq);
3842 3984
3985 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
3986 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
3987 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
3988 dev_priv->rps.rp1_freq);
3989
3843 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv); 3990 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
3844 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 3991 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3845 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq), 3992 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
@@ -3855,11 +4002,142 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
3855 mutex_unlock(&dev_priv->rps.hw_lock); 4002 mutex_unlock(&dev_priv->rps.hw_lock);
3856} 4003}
3857 4004
4005static void cherryview_init_gt_powersave(struct drm_device *dev)
4006{
4007 struct drm_i915_private *dev_priv = dev->dev_private;
4008
4009 cherryview_setup_pctx(dev);
4010
4011 mutex_lock(&dev_priv->rps.hw_lock);
4012
4013 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
4014 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
4015 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4016 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
4017 dev_priv->rps.max_freq);
4018
4019 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
4020 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4021 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4022 dev_priv->rps.efficient_freq);
4023
4024 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
4025 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
4026 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
4027 dev_priv->rps.rp1_freq);
4028
4029 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
4030 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4031 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4032 dev_priv->rps.min_freq);
4033
4034 /* Preserve min/max settings in case of re-init */
4035 if (dev_priv->rps.max_freq_softlimit == 0)
4036 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4037
4038 if (dev_priv->rps.min_freq_softlimit == 0)
4039 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
4040
4041 mutex_unlock(&dev_priv->rps.hw_lock);
4042}
4043
3858static void valleyview_cleanup_gt_powersave(struct drm_device *dev) 4044static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
3859{ 4045{
3860 valleyview_cleanup_pctx(dev); 4046 valleyview_cleanup_pctx(dev);
3861} 4047}
3862 4048
4049static void cherryview_enable_rps(struct drm_device *dev)
4050{
4051 struct drm_i915_private *dev_priv = dev->dev_private;
4052 struct intel_engine_cs *ring;
4053 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
4054 int i;
4055
4056 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4057
4058 gtfifodbg = I915_READ(GTFIFODBG);
4059 if (gtfifodbg) {
4060 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4061 gtfifodbg);
4062 I915_WRITE(GTFIFODBG, gtfifodbg);
4063 }
4064
4065 cherryview_check_pctx(dev_priv);
4066
4067 /* 1a & 1b: Get forcewake during program sequence. Although the driver
4068 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4069 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4070
4071 /* 2a: Program RC6 thresholds.*/
4072 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
4073 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4074 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4075
4076 for_each_ring(ring, dev_priv, i)
4077 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4078 I915_WRITE(GEN6_RC_SLEEP, 0);
4079
4080 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
4081
4082 /* allows RC6 residency counter to work */
4083 I915_WRITE(VLV_COUNTER_CONTROL,
4084 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
4085 VLV_MEDIA_RC6_COUNT_EN |
4086 VLV_RENDER_RC6_COUNT_EN));
4087
4088 /* For now we assume BIOS is allocating and populating the PCBR */
4089 pcbr = I915_READ(VLV_PCBR);
4090
4091 DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
4092
4093 /* 3: Enable RC6 */
4094 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
4095 (pcbr >> VLV_PCBR_ADDR_SHIFT))
4096 rc6_mode = GEN6_RC_CTL_EI_MODE(1);
4097
4098 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
4099
4100 /* 4 Program defaults and thresholds for RPS*/
4101 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
4102 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
4103 I915_WRITE(GEN6_RP_UP_EI, 66000);
4104 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
4105
4106 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4107
4108 /* WaDisablePwrmtrEvent:chv (pre-production hw) */
4109 I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
4110 I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
4111
4112 /* 5: Enable RPS */
4113 I915_WRITE(GEN6_RP_CONTROL,
4114 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4115 GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
4116 GEN6_RP_ENABLE |
4117 GEN6_RP_UP_BUSY_AVG |
4118 GEN6_RP_DOWN_IDLE_AVG);
4119
4120 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4121
4122 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
4123 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4124
4125 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
4126 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4127 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
4128 dev_priv->rps.cur_freq);
4129
4130 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4131 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4132 dev_priv->rps.efficient_freq);
4133
4134 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4135
4136 gen8_enable_rps_interrupts(dev);
4137
4138 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4139}
4140
3863static void valleyview_enable_rps(struct drm_device *dev) 4141static void valleyview_enable_rps(struct drm_device *dev)
3864{ 4142{
3865 struct drm_i915_private *dev_priv = dev->dev_private; 4143 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3886,6 +4164,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
3886 I915_WRITE(GEN6_RP_DOWN_EI, 350000); 4164 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3887 4165
3888 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 4166 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4167 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
3889 4168
3890 I915_WRITE(GEN6_RP_CONTROL, 4169 I915_WRITE(GEN6_RP_CONTROL,
3891 GEN6_RP_MEDIA_TURBO | 4170 GEN6_RP_MEDIA_TURBO |
@@ -3906,9 +4185,11 @@ static void valleyview_enable_rps(struct drm_device *dev)
3906 4185
3907 /* allows RC6 residency counter to work */ 4186 /* allows RC6 residency counter to work */
3908 I915_WRITE(VLV_COUNTER_CONTROL, 4187 I915_WRITE(VLV_COUNTER_CONTROL,
3909 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | 4188 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
4189 VLV_RENDER_RC0_COUNT_EN |
3910 VLV_MEDIA_RC6_COUNT_EN | 4190 VLV_MEDIA_RC6_COUNT_EN |
3911 VLV_RENDER_RC6_COUNT_EN)); 4191 VLV_RENDER_RC6_COUNT_EN));
4192
3912 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 4193 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3913 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; 4194 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
3914 4195
@@ -4666,33 +4947,60 @@ void intel_init_gt_powersave(struct drm_device *dev)
4666{ 4947{
4667 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); 4948 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
4668 4949
4669 if (IS_VALLEYVIEW(dev)) 4950 if (IS_CHERRYVIEW(dev))
4951 cherryview_init_gt_powersave(dev);
4952 else if (IS_VALLEYVIEW(dev))
4670 valleyview_init_gt_powersave(dev); 4953 valleyview_init_gt_powersave(dev);
4671} 4954}
4672 4955
4673void intel_cleanup_gt_powersave(struct drm_device *dev) 4956void intel_cleanup_gt_powersave(struct drm_device *dev)
4674{ 4957{
4675 if (IS_VALLEYVIEW(dev)) 4958 if (IS_CHERRYVIEW(dev))
4959 return;
4960 else if (IS_VALLEYVIEW(dev))
4676 valleyview_cleanup_gt_powersave(dev); 4961 valleyview_cleanup_gt_powersave(dev);
4677} 4962}
4678 4963
4964/**
4965 * intel_suspend_gt_powersave - suspend PM work and helper threads
4966 * @dev: drm device
4967 *
4968 * We don't want to disable RC6 or other features here, we just want
4969 * to make sure any work we've queued has finished and won't bother
4970 * us while we're suspended.
4971 */
4972void intel_suspend_gt_powersave(struct drm_device *dev)
4973{
4974 struct drm_i915_private *dev_priv = dev->dev_private;
4975
4976 /* Interrupts should be disabled already to avoid re-arming. */
4977 WARN_ON(intel_irqs_enabled(dev_priv));
4978
4979 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4980
4981 cancel_work_sync(&dev_priv->rps.work);
4982
4983 /* Force GPU to min freq during suspend */
4984 gen6_rps_idle(dev_priv);
4985}
4986
4679void intel_disable_gt_powersave(struct drm_device *dev) 4987void intel_disable_gt_powersave(struct drm_device *dev)
4680{ 4988{
4681 struct drm_i915_private *dev_priv = dev->dev_private; 4989 struct drm_i915_private *dev_priv = dev->dev_private;
4682 4990
4683 /* Interrupts should be disabled already to avoid re-arming. */ 4991 /* Interrupts should be disabled already to avoid re-arming. */
4684 WARN_ON(dev->irq_enabled); 4992 WARN_ON(intel_irqs_enabled(dev_priv));
4685 4993
4686 if (IS_IRONLAKE_M(dev)) { 4994 if (IS_IRONLAKE_M(dev)) {
4687 ironlake_disable_drps(dev); 4995 ironlake_disable_drps(dev);
4688 ironlake_disable_rc6(dev); 4996 ironlake_disable_rc6(dev);
4689 } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) { 4997 } else if (INTEL_INFO(dev)->gen >= 6) {
4690 if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work)) 4998 intel_suspend_gt_powersave(dev);
4691 intel_runtime_pm_put(dev_priv);
4692 4999
4693 cancel_work_sync(&dev_priv->rps.work);
4694 mutex_lock(&dev_priv->rps.hw_lock); 5000 mutex_lock(&dev_priv->rps.hw_lock);
4695 if (IS_VALLEYVIEW(dev)) 5001 if (IS_CHERRYVIEW(dev))
5002 cherryview_disable_rps(dev);
5003 else if (IS_VALLEYVIEW(dev))
4696 valleyview_disable_rps(dev); 5004 valleyview_disable_rps(dev);
4697 else 5005 else
4698 gen6_disable_rps(dev); 5006 gen6_disable_rps(dev);
@@ -4710,7 +5018,9 @@ static void intel_gen6_powersave_work(struct work_struct *work)
4710 5018
4711 mutex_lock(&dev_priv->rps.hw_lock); 5019 mutex_lock(&dev_priv->rps.hw_lock);
4712 5020
4713 if (IS_VALLEYVIEW(dev)) { 5021 if (IS_CHERRYVIEW(dev)) {
5022 cherryview_enable_rps(dev);
5023 } else if (IS_VALLEYVIEW(dev)) {
4714 valleyview_enable_rps(dev); 5024 valleyview_enable_rps(dev);
4715 } else if (IS_BROADWELL(dev)) { 5025 } else if (IS_BROADWELL(dev)) {
4716 gen8_enable_rps(dev); 5026 gen8_enable_rps(dev);
@@ -4735,7 +5045,7 @@ void intel_enable_gt_powersave(struct drm_device *dev)
4735 ironlake_enable_rc6(dev); 5045 ironlake_enable_rc6(dev);
4736 intel_init_emon(dev); 5046 intel_init_emon(dev);
4737 mutex_unlock(&dev->struct_mutex); 5047 mutex_unlock(&dev->struct_mutex);
4738 } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) { 5048 } else if (INTEL_INFO(dev)->gen >= 6) {
4739 /* 5049 /*
4740 * PCU communication is slow and this doesn't need to be 5050 * PCU communication is slow and this doesn't need to be
4741 * done at any specific time, so do this out of our fast path 5051 * done at any specific time, so do this out of our fast path
@@ -5108,7 +5418,7 @@ static void gen8_init_clock_gating(struct drm_device *dev)
5108 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE)); 5418 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
5109 5419
5110 I915_WRITE(_3D_CHICKEN3, 5420 I915_WRITE(_3D_CHICKEN3,
5111 _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)); 5421 _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
5112 5422
5113 I915_WRITE(COMMON_SLICE_CHICKEN2, 5423 I915_WRITE(COMMON_SLICE_CHICKEN2,
5114 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE)); 5424 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
@@ -5343,10 +5653,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
5343 } 5653 }
5344 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); 5654 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
5345 5655
5346 dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
5347 DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
5348 dev_priv->vlv_cdclk_freq);
5349
5350 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 5656 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5351 5657
5352 /* WaDisableEarlyCull:vlv */ 5658 /* WaDisableEarlyCull:vlv */
@@ -5421,6 +5727,35 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
5421static void cherryview_init_clock_gating(struct drm_device *dev) 5727static void cherryview_init_clock_gating(struct drm_device *dev)
5422{ 5728{
5423 struct drm_i915_private *dev_priv = dev->dev_private; 5729 struct drm_i915_private *dev_priv = dev->dev_private;
5730 u32 val;
5731
5732 mutex_lock(&dev_priv->rps.hw_lock);
5733 val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
5734 mutex_unlock(&dev_priv->rps.hw_lock);
5735 switch ((val >> 2) & 0x7) {
5736 case 0:
5737 case 1:
5738 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200;
5739 dev_priv->mem_freq = 1600;
5740 break;
5741 case 2:
5742 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267;
5743 dev_priv->mem_freq = 1600;
5744 break;
5745 case 3:
5746 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333;
5747 dev_priv->mem_freq = 2000;
5748 break;
5749 case 4:
5750 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320;
5751 dev_priv->mem_freq = 1600;
5752 break;
5753 case 5:
5754 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400;
5755 dev_priv->mem_freq = 1600;
5756 break;
5757 }
5758 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
5424 5759
5425 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 5760 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5426 5761
@@ -5661,7 +5996,6 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
5661static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) 5996static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
5662{ 5997{
5663 struct drm_device *dev = dev_priv->dev; 5998 struct drm_device *dev = dev_priv->dev;
5664 unsigned long irqflags;
5665 5999
5666 /* 6000 /*
5667 * After we re-enable the power well, if we touch VGA register 0x3d5 6001 * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -5677,21 +6011,8 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
5677 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 6011 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
5678 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 6012 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
5679 6013
5680 if (IS_BROADWELL(dev)) { 6014 if (IS_BROADWELL(dev))
5681 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 6015 gen8_irq_power_well_post_enable(dev_priv);
5682 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
5683 dev_priv->de_irq_mask[PIPE_B]);
5684 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
5685 ~dev_priv->de_irq_mask[PIPE_B] |
5686 GEN8_PIPE_VBLANK);
5687 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
5688 dev_priv->de_irq_mask[PIPE_C]);
5689 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
5690 ~dev_priv->de_irq_mask[PIPE_C] |
5691 GEN8_PIPE_VBLANK);
5692 POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
5693 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
5694 }
5695} 6016}
5696 6017
5697static void hsw_set_power_well(struct drm_i915_private *dev_priv, 6018static void hsw_set_power_well(struct drm_i915_private *dev_priv,
@@ -5762,34 +6083,13 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
5762 return true; 6083 return true;
5763} 6084}
5764 6085
5765void __vlv_set_power_well(struct drm_i915_private *dev_priv, 6086static void vlv_set_power_well(struct drm_i915_private *dev_priv,
5766 enum punit_power_well power_well_id, bool enable) 6087 struct i915_power_well *power_well, bool enable)
5767{ 6088{
5768 struct drm_device *dev = dev_priv->dev; 6089 enum punit_power_well power_well_id = power_well->data;
5769 u32 mask; 6090 u32 mask;
5770 u32 state; 6091 u32 state;
5771 u32 ctrl; 6092 u32 ctrl;
5772 enum pipe pipe;
5773
5774 if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
5775 if (enable) {
5776 /*
5777 * Enable the CRI clock source so we can get at the
5778 * display and the reference clock for VGA
5779 * hotplug / manual detection.
5780 */
5781 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
5782 DPLL_REFA_CLK_ENABLE_VLV |
5783 DPLL_INTEGRATED_CRI_CLK_VLV);
5784 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
5785 } else {
5786 for_each_pipe(pipe)
5787 assert_pll_disabled(dev_priv, pipe);
5788 /* Assert common reset */
5789 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) &
5790 ~DPIO_CMNRST);
5791 }
5792 }
5793 6093
5794 mask = PUNIT_PWRGT_MASK(power_well_id); 6094 mask = PUNIT_PWRGT_MASK(power_well_id);
5795 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : 6095 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
@@ -5817,28 +6117,6 @@ void __vlv_set_power_well(struct drm_i915_private *dev_priv,
5817 6117
5818out: 6118out:
5819 mutex_unlock(&dev_priv->rps.hw_lock); 6119 mutex_unlock(&dev_priv->rps.hw_lock);
5820
5821 /*
5822 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
5823 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
5824 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
5825 * b. The other bits such as sfr settings / modesel may all
5826 * be set to 0.
5827 *
5828 * This should only be done on init and resume from S3 with
5829 * both PLLs disabled, or we risk losing DPIO and PLL
5830 * synchronization.
5831 */
5832 if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable)
5833 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
5834}
5835
5836static void vlv_set_power_well(struct drm_i915_private *dev_priv,
5837 struct i915_power_well *power_well, bool enable)
5838{
5839 enum punit_power_well power_well_id = power_well->data;
5840
5841 __vlv_set_power_well(dev_priv, power_well_id, enable);
5842} 6120}
5843 6121
5844static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, 6122static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -5930,6 +6208,53 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
5930 vlv_set_power_well(dev_priv, power_well, false); 6208 vlv_set_power_well(dev_priv, power_well, false);
5931} 6209}
5932 6210
6211static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6212 struct i915_power_well *power_well)
6213{
6214 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6215
6216 /*
6217 * Enable the CRI clock source so we can get at the
6218 * display and the reference clock for VGA
6219 * hotplug / manual detection.
6220 */
6221 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6222 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6223 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6224
6225 vlv_set_power_well(dev_priv, power_well, true);
6226
6227 /*
6228 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
6229 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
6230 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
6231 * b. The other bits such as sfr settings / modesel may all
6232 * be set to 0.
6233 *
6234 * This should only be done on init and resume from S3 with
6235 * both PLLs disabled, or we risk losing DPIO and PLL
6236 * synchronization.
6237 */
6238 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
6239}
6240
6241static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6242 struct i915_power_well *power_well)
6243{
6244 struct drm_device *dev = dev_priv->dev;
6245 enum pipe pipe;
6246
6247 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6248
6249 for_each_pipe(pipe)
6250 assert_pll_disabled(dev_priv, pipe);
6251
6252 /* Assert common reset */
6253 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
6254
6255 vlv_set_power_well(dev_priv, power_well, false);
6256}
6257
5933static void check_power_well_state(struct drm_i915_private *dev_priv, 6258static void check_power_well_state(struct drm_i915_private *dev_priv,
5934 struct i915_power_well *power_well) 6259 struct i915_power_well *power_well)
5935{ 6260{
@@ -6079,6 +6404,7 @@ EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
6079 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 6404 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6080 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ 6405 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6081 BIT(POWER_DOMAIN_PORT_CRT) | \ 6406 BIT(POWER_DOMAIN_PORT_CRT) | \
6407 BIT(POWER_DOMAIN_PLLS) | \
6082 BIT(POWER_DOMAIN_INIT)) 6408 BIT(POWER_DOMAIN_INIT))
6083#define HSW_DISPLAY_POWER_DOMAINS ( \ 6409#define HSW_DISPLAY_POWER_DOMAINS ( \
6084 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ 6410 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
@@ -6178,6 +6504,13 @@ static const struct i915_power_well_ops vlv_display_power_well_ops = {
6178 .is_enabled = vlv_power_well_enabled, 6504 .is_enabled = vlv_power_well_enabled,
6179}; 6505};
6180 6506
6507static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
6508 .sync_hw = vlv_power_well_sync_hw,
6509 .enable = vlv_dpio_cmn_power_well_enable,
6510 .disable = vlv_dpio_cmn_power_well_disable,
6511 .is_enabled = vlv_power_well_enabled,
6512};
6513
6181static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 6514static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
6182 .sync_hw = vlv_power_well_sync_hw, 6515 .sync_hw = vlv_power_well_sync_hw,
6183 .enable = vlv_power_well_enable, 6516 .enable = vlv_power_well_enable,
@@ -6238,10 +6571,25 @@ static struct i915_power_well vlv_power_wells[] = {
6238 .name = "dpio-common", 6571 .name = "dpio-common",
6239 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 6572 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
6240 .data = PUNIT_POWER_WELL_DPIO_CMN_BC, 6573 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
6241 .ops = &vlv_dpio_power_well_ops, 6574 .ops = &vlv_dpio_cmn_power_well_ops,
6242 }, 6575 },
6243}; 6576};
6244 6577
6578static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
6579 enum punit_power_well power_well_id)
6580{
6581 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6582 struct i915_power_well *power_well;
6583 int i;
6584
6585 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
6586 if (power_well->data == power_well_id)
6587 return power_well;
6588 }
6589
6590 return NULL;
6591}
6592
6245#define set_power_wells(power_domains, __power_wells) ({ \ 6593#define set_power_wells(power_domains, __power_wells) ({ \
6246 (power_domains)->power_wells = (__power_wells); \ 6594 (power_domains)->power_wells = (__power_wells); \
6247 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ 6595 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
@@ -6292,11 +6640,50 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
6292 mutex_unlock(&power_domains->lock); 6640 mutex_unlock(&power_domains->lock);
6293} 6641}
6294 6642
6643static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
6644{
6645 struct i915_power_well *cmn =
6646 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
6647 struct i915_power_well *disp2d =
6648 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
6649
6650 /* nothing to do if common lane is already off */
6651 if (!cmn->ops->is_enabled(dev_priv, cmn))
6652 return;
6653
6654 /* If the display might be already active skip this */
6655 if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
6656 I915_READ(DPIO_CTL) & DPIO_CMNRST)
6657 return;
6658
6659 DRM_DEBUG_KMS("toggling display PHY side reset\n");
6660
6661 /* cmnlane needs DPLL registers */
6662 disp2d->ops->enable(dev_priv, disp2d);
6663
6664 /*
6665 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
6666 * Need to assert and de-assert PHY SB reset by gating the
6667 * common lane power, then un-gating it.
6668 * Simply ungating isn't enough to reset the PHY enough to get
6669 * ports and lanes running.
6670 */
6671 cmn->ops->disable(dev_priv, cmn);
6672}
6673
6295void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) 6674void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
6296{ 6675{
6676 struct drm_device *dev = dev_priv->dev;
6297 struct i915_power_domains *power_domains = &dev_priv->power_domains; 6677 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6298 6678
6299 power_domains->initializing = true; 6679 power_domains->initializing = true;
6680
6681 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
6682 mutex_lock(&power_domains->lock);
6683 vlv_cmnlane_wa(dev_priv);
6684 mutex_unlock(&power_domains->lock);
6685 }
6686
6300 /* For now, we need the power well to be always enabled. */ 6687 /* For now, we need the power well to be always enabled. */
6301 intel_display_set_init_power(dev_priv, true); 6688 intel_display_set_init_power(dev_priv, true);
6302 intel_power_domains_resume(dev_priv); 6689 intel_power_domains_resume(dev_priv);
@@ -6469,7 +6856,7 @@ void intel_init_pm(struct drm_device *dev)
6469 (dev_priv->is_ddr3 == 1) ? "3" : "2", 6856 (dev_priv->is_ddr3 == 1) ? "3" : "2",
6470 dev_priv->fsb_freq, dev_priv->mem_freq); 6857 dev_priv->fsb_freq, dev_priv->mem_freq);
6471 /* Disable CxSR and never update its watermark again */ 6858 /* Disable CxSR and never update its watermark again */
6472 pineview_disable_cxsr(dev); 6859 intel_set_memory_cxsr(dev_priv, false);
6473 dev_priv->display.update_wm = NULL; 6860 dev_priv->display.update_wm = NULL;
6474 } else 6861 } else
6475 dev_priv->display.update_wm = pineview_update_wm; 6862 dev_priv->display.update_wm = pineview_update_wm;
@@ -6552,7 +6939,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
6552 return 0; 6939 return 0;
6553} 6940}
6554 6941
6555int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val) 6942static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
6556{ 6943{
6557 int div; 6944 int div;
6558 6945
@@ -6574,7 +6961,7 @@ int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
6574 return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div); 6961 return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
6575} 6962}
6576 6963
6577int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val) 6964static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
6578{ 6965{
6579 int mul; 6966 int mul;
6580 6967
@@ -6596,6 +6983,80 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
6596 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6; 6983 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
6597} 6984}
6598 6985
6986static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
6987{
6988 int div, freq;
6989
6990 switch (dev_priv->rps.cz_freq) {
6991 case 200:
6992 div = 5;
6993 break;
6994 case 267:
6995 div = 6;
6996 break;
6997 case 320:
6998 case 333:
6999 case 400:
7000 div = 8;
7001 break;
7002 default:
7003 return -1;
7004 }
7005
7006 freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
7007
7008 return freq;
7009}
7010
7011static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7012{
7013 int mul, opcode;
7014
7015 switch (dev_priv->rps.cz_freq) {
7016 case 200:
7017 mul = 5;
7018 break;
7019 case 267:
7020 mul = 6;
7021 break;
7022 case 320:
7023 case 333:
7024 case 400:
7025 mul = 8;
7026 break;
7027 default:
7028 return -1;
7029 }
7030
7031 opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
7032
7033 return opcode;
7034}
7035
7036int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7037{
7038 int ret = -1;
7039
7040 if (IS_CHERRYVIEW(dev_priv->dev))
7041 ret = chv_gpu_freq(dev_priv, val);
7042 else if (IS_VALLEYVIEW(dev_priv->dev))
7043 ret = byt_gpu_freq(dev_priv, val);
7044
7045 return ret;
7046}
7047
7048int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7049{
7050 int ret = -1;
7051
7052 if (IS_CHERRYVIEW(dev_priv->dev))
7053 ret = chv_freq_opcode(dev_priv, val);
7054 else if (IS_VALLEYVIEW(dev_priv->dev))
7055 ret = byt_freq_opcode(dev_priv, val);
7056
7057 return ret;
7058}
7059
6599void intel_pm_setup(struct drm_device *dev) 7060void intel_pm_setup(struct drm_device *dev)
6600{ 7061{
6601 struct drm_i915_private *dev_priv = dev->dev_private; 7062 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6606,5 +7067,5 @@ void intel_pm_setup(struct drm_device *dev)
6606 intel_gen6_powersave_work); 7067 intel_gen6_powersave_work);
6607 7068
6608 dev_priv->pm.suspended = false; 7069 dev_priv->pm.suspended = false;
6609 dev_priv->pm.irqs_disabled = false; 7070 dev_priv->pm._irqs_disabled = false;
6610} 7071}
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
index a5e783a9928a..fd4f66231d30 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -28,7 +28,6 @@
28 28
29struct intel_renderstate_rodata { 29struct intel_renderstate_rodata {
30 const u32 *reloc; 30 const u32 *reloc;
31 const u32 reloc_items;
32 const u32 *batch; 31 const u32 *batch;
33 const u32 batch_items; 32 const u32 batch_items;
34}; 33};
@@ -40,7 +39,6 @@ extern const struct intel_renderstate_rodata gen8_null_state;
40#define RO_RENDERSTATE(_g) \ 39#define RO_RENDERSTATE(_g) \
41 const struct intel_renderstate_rodata gen ## _g ## _null_state = { \ 40 const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
42 .reloc = gen ## _g ## _null_state_relocs, \ 41 .reloc = gen ## _g ## _null_state_relocs, \
43 .reloc_items = sizeof(gen ## _g ## _null_state_relocs)/4, \
44 .batch = gen ## _g ## _null_state_batch, \ 42 .batch = gen ## _g ## _null_state_batch, \
45 .batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \ 43 .batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
46 } 44 }
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
index 740538ad0977..56c1429d8a60 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen6.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
@@ -6,6 +6,7 @@ static const u32 gen6_null_state_relocs[] = {
6 0x0000002c, 6 0x0000002c,
7 0x000001e0, 7 0x000001e0,
8 0x000001e4, 8 0x000001e4,
9 -1,
9}; 10};
10 11
11static const u32 gen6_null_state_batch[] = { 12static const u32 gen6_null_state_batch[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
index 6fa7ff2a1298..419e35a7b0ff 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen7.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
@@ -5,6 +5,7 @@ static const u32 gen7_null_state_relocs[] = {
5 0x00000010, 5 0x00000010,
6 0x00000018, 6 0x00000018,
7 0x000001ec, 7 0x000001ec,
8 -1,
8}; 9};
9 10
10static const u32 gen7_null_state_batch[] = { 11static const u32 gen7_null_state_batch[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
index 5c875615d42a..75ef1b5de45c 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
@@ -5,6 +5,7 @@ static const u32 gen8_null_state_relocs[] = {
5 0x00000050, 5 0x00000050,
6 0x00000060, 6 0x00000060,
7 0x000003ec, 7 0x000003ec,
8 -1,
8}; 9};
9 10
10static const u32 gen8_null_state_batch[] = { 11static const u32 gen8_null_state_batch[] = {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 279488addf3f..b3d8f766fa7f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -48,9 +48,8 @@ static inline int __ring_space(int head, int tail, int size)
48 return space; 48 return space;
49} 49}
50 50
51static inline int ring_space(struct intel_engine_cs *ring) 51static inline int ring_space(struct intel_ringbuffer *ringbuf)
52{ 52{
53 struct intel_ringbuffer *ringbuf = ring->buffer;
54 return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size); 53 return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
55} 54}
56 55
@@ -545,7 +544,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
545 else { 544 else {
546 ringbuf->head = I915_READ_HEAD(ring); 545 ringbuf->head = I915_READ_HEAD(ring);
547 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 546 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
548 ringbuf->space = ring_space(ring); 547 ringbuf->space = ring_space(ringbuf);
549 ringbuf->last_retired_head = -1; 548 ringbuf->last_retired_head = -1;
550 } 549 }
551 550
@@ -604,6 +603,8 @@ static int init_render_ring(struct intel_engine_cs *ring)
604 struct drm_device *dev = ring->dev; 603 struct drm_device *dev = ring->dev;
605 struct drm_i915_private *dev_priv = dev->dev_private; 604 struct drm_i915_private *dev_priv = dev->dev_private;
606 int ret = init_ring_common(ring); 605 int ret = init_ring_common(ring);
606 if (ret)
607 return ret;
607 608
608 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 609 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
609 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) 610 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
@@ -658,6 +659,13 @@ static int init_render_ring(struct intel_engine_cs *ring)
658static void render_ring_cleanup(struct intel_engine_cs *ring) 659static void render_ring_cleanup(struct intel_engine_cs *ring)
659{ 660{
660 struct drm_device *dev = ring->dev; 661 struct drm_device *dev = ring->dev;
662 struct drm_i915_private *dev_priv = dev->dev_private;
663
664 if (dev_priv->semaphore_obj) {
665 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
666 drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
667 dev_priv->semaphore_obj = NULL;
668 }
661 669
662 if (ring->scratch.obj == NULL) 670 if (ring->scratch.obj == NULL)
663 return; 671 return;
@@ -671,29 +679,96 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
671 ring->scratch.obj = NULL; 679 ring->scratch.obj = NULL;
672} 680}
673 681
682static int gen8_rcs_signal(struct intel_engine_cs *signaller,
683 unsigned int num_dwords)
684{
685#define MBOX_UPDATE_DWORDS 8
686 struct drm_device *dev = signaller->dev;
687 struct drm_i915_private *dev_priv = dev->dev_private;
688 struct intel_engine_cs *waiter;
689 int i, ret, num_rings;
690
691 num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
692 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
693#undef MBOX_UPDATE_DWORDS
694
695 ret = intel_ring_begin(signaller, num_dwords);
696 if (ret)
697 return ret;
698
699 for_each_ring(waiter, dev_priv, i) {
700 u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
701 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
702 continue;
703
704 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
705 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
706 PIPE_CONTROL_QW_WRITE |
707 PIPE_CONTROL_FLUSH_ENABLE);
708 intel_ring_emit(signaller, lower_32_bits(gtt_offset));
709 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
710 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
711 intel_ring_emit(signaller, 0);
712 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
713 MI_SEMAPHORE_TARGET(waiter->id));
714 intel_ring_emit(signaller, 0);
715 }
716
717 return 0;
718}
719
720static int gen8_xcs_signal(struct intel_engine_cs *signaller,
721 unsigned int num_dwords)
722{
723#define MBOX_UPDATE_DWORDS 6
724 struct drm_device *dev = signaller->dev;
725 struct drm_i915_private *dev_priv = dev->dev_private;
726 struct intel_engine_cs *waiter;
727 int i, ret, num_rings;
728
729 num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
730 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
731#undef MBOX_UPDATE_DWORDS
732
733 ret = intel_ring_begin(signaller, num_dwords);
734 if (ret)
735 return ret;
736
737 for_each_ring(waiter, dev_priv, i) {
738 u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
739 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
740 continue;
741
742 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
743 MI_FLUSH_DW_OP_STOREDW);
744 intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
745 MI_FLUSH_DW_USE_GTT);
746 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
747 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
748 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
749 MI_SEMAPHORE_TARGET(waiter->id));
750 intel_ring_emit(signaller, 0);
751 }
752
753 return 0;
754}
755
674static int gen6_signal(struct intel_engine_cs *signaller, 756static int gen6_signal(struct intel_engine_cs *signaller,
675 unsigned int num_dwords) 757 unsigned int num_dwords)
676{ 758{
677 struct drm_device *dev = signaller->dev; 759 struct drm_device *dev = signaller->dev;
678 struct drm_i915_private *dev_priv = dev->dev_private; 760 struct drm_i915_private *dev_priv = dev->dev_private;
679 struct intel_engine_cs *useless; 761 struct intel_engine_cs *useless;
680 int i, ret; 762 int i, ret, num_rings;
681 763
682 /* NB: In order to be able to do semaphore MBOX updates for varying 764#define MBOX_UPDATE_DWORDS 3
683 * number of rings, it's easiest if we round up each individual update 765 num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
684 * to a multiple of 2 (since ring updates must always be a multiple of 766 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
685 * 2) even though the actual update only requires 3 dwords. 767#undef MBOX_UPDATE_DWORDS
686 */
687#define MBOX_UPDATE_DWORDS 4
688 if (i915_semaphore_is_enabled(dev))
689 num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
690 else
691 return intel_ring_begin(signaller, num_dwords);
692 768
693 ret = intel_ring_begin(signaller, num_dwords); 769 ret = intel_ring_begin(signaller, num_dwords);
694 if (ret) 770 if (ret)
695 return ret; 771 return ret;
696#undef MBOX_UPDATE_DWORDS
697 772
698 for_each_ring(useless, dev_priv, i) { 773 for_each_ring(useless, dev_priv, i) {
699 u32 mbox_reg = signaller->semaphore.mbox.signal[i]; 774 u32 mbox_reg = signaller->semaphore.mbox.signal[i];
@@ -701,15 +776,13 @@ static int gen6_signal(struct intel_engine_cs *signaller,
701 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); 776 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
702 intel_ring_emit(signaller, mbox_reg); 777 intel_ring_emit(signaller, mbox_reg);
703 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); 778 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
704 intel_ring_emit(signaller, MI_NOOP);
705 } else {
706 intel_ring_emit(signaller, MI_NOOP);
707 intel_ring_emit(signaller, MI_NOOP);
708 intel_ring_emit(signaller, MI_NOOP);
709 intel_ring_emit(signaller, MI_NOOP);
710 } 779 }
711 } 780 }
712 781
782 /* If num_dwords was rounded, make sure the tail pointer is correct */
783 if (num_rings % 2 == 0)
784 intel_ring_emit(signaller, MI_NOOP);
785
713 return 0; 786 return 0;
714} 787}
715 788
@@ -727,7 +800,11 @@ gen6_add_request(struct intel_engine_cs *ring)
727{ 800{
728 int ret; 801 int ret;
729 802
730 ret = ring->semaphore.signal(ring, 4); 803 if (ring->semaphore.signal)
804 ret = ring->semaphore.signal(ring, 4);
805 else
806 ret = intel_ring_begin(ring, 4);
807
731 if (ret) 808 if (ret)
732 return ret; 809 return ret;
733 810
@@ -754,6 +831,32 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
754 * @signaller - ring which has, or will signal 831 * @signaller - ring which has, or will signal
755 * @seqno - seqno which the waiter will block on 832 * @seqno - seqno which the waiter will block on
756 */ 833 */
834
835static int
836gen8_ring_sync(struct intel_engine_cs *waiter,
837 struct intel_engine_cs *signaller,
838 u32 seqno)
839{
840 struct drm_i915_private *dev_priv = waiter->dev->dev_private;
841 int ret;
842
843 ret = intel_ring_begin(waiter, 4);
844 if (ret)
845 return ret;
846
847 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
848 MI_SEMAPHORE_GLOBAL_GTT |
849 MI_SEMAPHORE_POLL |
850 MI_SEMAPHORE_SAD_GTE_SDD);
851 intel_ring_emit(waiter, seqno);
852 intel_ring_emit(waiter,
853 lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
854 intel_ring_emit(waiter,
855 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
856 intel_ring_advance(waiter);
857 return 0;
858}
859
757static int 860static int
758gen6_ring_sync(struct intel_engine_cs *waiter, 861gen6_ring_sync(struct intel_engine_cs *waiter,
759 struct intel_engine_cs *signaller, 862 struct intel_engine_cs *signaller,
@@ -901,7 +1004,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
901 1004
902 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1005 spin_lock_irqsave(&dev_priv->irq_lock, flags);
903 if (ring->irq_refcount++ == 0) 1006 if (ring->irq_refcount++ == 0)
904 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1007 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
905 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1008 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
906 1009
907 return true; 1010 return true;
@@ -916,7 +1019,7 @@ gen5_ring_put_irq(struct intel_engine_cs *ring)
916 1019
917 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1020 spin_lock_irqsave(&dev_priv->irq_lock, flags);
918 if (--ring->irq_refcount == 0) 1021 if (--ring->irq_refcount == 0)
919 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1022 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
920 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1023 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
921} 1024}
922 1025
@@ -1109,7 +1212,7 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
1109 GT_PARITY_ERROR(dev))); 1212 GT_PARITY_ERROR(dev)));
1110 else 1213 else
1111 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1214 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1112 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1215 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1113 } 1216 }
1114 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1217 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1115 1218
@@ -1129,7 +1232,7 @@ gen6_ring_put_irq(struct intel_engine_cs *ring)
1129 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 1232 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
1130 else 1233 else
1131 I915_WRITE_IMR(ring, ~0); 1234 I915_WRITE_IMR(ring, ~0);
1132 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1235 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1133 } 1236 }
1134 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1237 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1135} 1238}
@@ -1147,7 +1250,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
1147 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1250 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1148 if (ring->irq_refcount++ == 0) { 1251 if (ring->irq_refcount++ == 0) {
1149 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1252 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1150 snb_enable_pm_irq(dev_priv, ring->irq_enable_mask); 1253 gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
1151 } 1254 }
1152 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1255 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1153 1256
@@ -1167,7 +1270,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring)
1167 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1270 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1168 if (--ring->irq_refcount == 0) { 1271 if (--ring->irq_refcount == 0) {
1169 I915_WRITE_IMR(ring, ~0); 1272 I915_WRITE_IMR(ring, ~0);
1170 snb_disable_pm_irq(dev_priv, ring->irq_enable_mask); 1273 gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
1171 } 1274 }
1172 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1275 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1173} 1276}
@@ -1329,6 +1432,7 @@ static int init_status_page(struct intel_engine_cs *ring)
1329 struct drm_i915_gem_object *obj; 1432 struct drm_i915_gem_object *obj;
1330 1433
1331 if ((obj = ring->status_page.obj) == NULL) { 1434 if ((obj = ring->status_page.obj) == NULL) {
1435 unsigned flags;
1332 int ret; 1436 int ret;
1333 1437
1334 obj = i915_gem_alloc_object(ring->dev, 4096); 1438 obj = i915_gem_alloc_object(ring->dev, 4096);
@@ -1341,7 +1445,20 @@ static int init_status_page(struct intel_engine_cs *ring)
1341 if (ret) 1445 if (ret)
1342 goto err_unref; 1446 goto err_unref;
1343 1447
1344 ret = i915_gem_obj_ggtt_pin(obj, 4096, 0); 1448 flags = 0;
1449 if (!HAS_LLC(ring->dev))
1450 /* On g33, we cannot place HWS above 256MiB, so
1451 * restrict its pinning to the low mappable arena.
1452 * Though this restriction is not documented for
1453 * gen4, gen5, or byt, they also behave similarly
1454 * and hang if the HWS is placed at the top of the
1455 * GTT. To generalise, it appears that all !llc
1456 * platforms have issues with us placing the HWS
1457 * above the mappable region (even though we never
1458 * actualy map it).
1459 */
1460 flags |= PIN_MAPPABLE;
1461 ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
1345 if (ret) { 1462 if (ret) {
1346err_unref: 1463err_unref:
1347 drm_gem_object_unreference(&obj->base); 1464 drm_gem_object_unreference(&obj->base);
@@ -1378,15 +1495,25 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
1378 return 0; 1495 return 0;
1379} 1496}
1380 1497
1381static int allocate_ring_buffer(struct intel_engine_cs *ring) 1498static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1499{
1500 if (!ringbuf->obj)
1501 return;
1502
1503 iounmap(ringbuf->virtual_start);
1504 i915_gem_object_ggtt_unpin(ringbuf->obj);
1505 drm_gem_object_unreference(&ringbuf->obj->base);
1506 ringbuf->obj = NULL;
1507}
1508
1509static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
1510 struct intel_ringbuffer *ringbuf)
1382{ 1511{
1383 struct drm_device *dev = ring->dev;
1384 struct drm_i915_private *dev_priv = to_i915(dev); 1512 struct drm_i915_private *dev_priv = to_i915(dev);
1385 struct intel_ringbuffer *ringbuf = ring->buffer;
1386 struct drm_i915_gem_object *obj; 1513 struct drm_i915_gem_object *obj;
1387 int ret; 1514 int ret;
1388 1515
1389 if (intel_ring_initialized(ring)) 1516 if (ringbuf->obj)
1390 return 0; 1517 return 0;
1391 1518
1392 obj = NULL; 1519 obj = NULL;
@@ -1397,6 +1524,9 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring)
1397 if (obj == NULL) 1524 if (obj == NULL)
1398 return -ENOMEM; 1525 return -ENOMEM;
1399 1526
1527 /* mark ring buffers as read-only from GPU side by default */
1528 obj->gt_ro = 1;
1529
1400 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 1530 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1401 if (ret) 1531 if (ret)
1402 goto err_unref; 1532 goto err_unref;
@@ -1455,7 +1585,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1455 goto error; 1585 goto error;
1456 } 1586 }
1457 1587
1458 ret = allocate_ring_buffer(ring); 1588 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1459 if (ret) { 1589 if (ret) {
1460 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); 1590 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
1461 goto error; 1591 goto error;
@@ -1496,11 +1626,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
1496 intel_stop_ring_buffer(ring); 1626 intel_stop_ring_buffer(ring);
1497 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); 1627 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
1498 1628
1499 iounmap(ringbuf->virtual_start); 1629 intel_destroy_ringbuffer_obj(ringbuf);
1500
1501 i915_gem_object_ggtt_unpin(ringbuf->obj);
1502 drm_gem_object_unreference(&ringbuf->obj->base);
1503 ringbuf->obj = NULL;
1504 ring->preallocated_lazy_request = NULL; 1630 ring->preallocated_lazy_request = NULL;
1505 ring->outstanding_lazy_seqno = 0; 1631 ring->outstanding_lazy_seqno = 0;
1506 1632
@@ -1526,7 +1652,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
1526 ringbuf->head = ringbuf->last_retired_head; 1652 ringbuf->head = ringbuf->last_retired_head;
1527 ringbuf->last_retired_head = -1; 1653 ringbuf->last_retired_head = -1;
1528 1654
1529 ringbuf->space = ring_space(ring); 1655 ringbuf->space = ring_space(ringbuf);
1530 if (ringbuf->space >= n) 1656 if (ringbuf->space >= n)
1531 return 0; 1657 return 0;
1532 } 1658 }
@@ -1549,7 +1675,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
1549 ringbuf->head = ringbuf->last_retired_head; 1675 ringbuf->head = ringbuf->last_retired_head;
1550 ringbuf->last_retired_head = -1; 1676 ringbuf->last_retired_head = -1;
1551 1677
1552 ringbuf->space = ring_space(ring); 1678 ringbuf->space = ring_space(ringbuf);
1553 return 0; 1679 return 0;
1554} 1680}
1555 1681
@@ -1578,7 +1704,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
1578 trace_i915_ring_wait_begin(ring); 1704 trace_i915_ring_wait_begin(ring);
1579 do { 1705 do {
1580 ringbuf->head = I915_READ_HEAD(ring); 1706 ringbuf->head = I915_READ_HEAD(ring);
1581 ringbuf->space = ring_space(ring); 1707 ringbuf->space = ring_space(ringbuf);
1582 if (ringbuf->space >= n) { 1708 if (ringbuf->space >= n) {
1583 ret = 0; 1709 ret = 0;
1584 break; 1710 break;
@@ -1630,7 +1756,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
1630 iowrite32(MI_NOOP, virt++); 1756 iowrite32(MI_NOOP, virt++);
1631 1757
1632 ringbuf->tail = 0; 1758 ringbuf->tail = 0;
1633 ringbuf->space = ring_space(ring); 1759 ringbuf->space = ring_space(ringbuf);
1634 1760
1635 return 0; 1761 return 0;
1636} 1762}
@@ -1746,14 +1872,15 @@ int intel_ring_cacheline_align(struct intel_engine_cs *ring)
1746 1872
1747void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno) 1873void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
1748{ 1874{
1749 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1875 struct drm_device *dev = ring->dev;
1876 struct drm_i915_private *dev_priv = dev->dev_private;
1750 1877
1751 BUG_ON(ring->outstanding_lazy_seqno); 1878 BUG_ON(ring->outstanding_lazy_seqno);
1752 1879
1753 if (INTEL_INFO(ring->dev)->gen >= 6) { 1880 if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
1754 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); 1881 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
1755 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); 1882 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
1756 if (HAS_VEBOX(ring->dev)) 1883 if (HAS_VEBOX(dev))
1757 I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); 1884 I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
1758 } 1885 }
1759 1886
@@ -1941,45 +2068,74 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1941{ 2068{
1942 struct drm_i915_private *dev_priv = dev->dev_private; 2069 struct drm_i915_private *dev_priv = dev->dev_private;
1943 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; 2070 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
2071 struct drm_i915_gem_object *obj;
2072 int ret;
1944 2073
1945 ring->name = "render ring"; 2074 ring->name = "render ring";
1946 ring->id = RCS; 2075 ring->id = RCS;
1947 ring->mmio_base = RENDER_RING_BASE; 2076 ring->mmio_base = RENDER_RING_BASE;
1948 2077
1949 if (INTEL_INFO(dev)->gen >= 6) { 2078 if (INTEL_INFO(dev)->gen >= 8) {
2079 if (i915_semaphore_is_enabled(dev)) {
2080 obj = i915_gem_alloc_object(dev, 4096);
2081 if (obj == NULL) {
2082 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
2083 i915.semaphores = 0;
2084 } else {
2085 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
2086 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
2087 if (ret != 0) {
2088 drm_gem_object_unreference(&obj->base);
2089 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
2090 i915.semaphores = 0;
2091 } else
2092 dev_priv->semaphore_obj = obj;
2093 }
2094 }
2095 ring->add_request = gen6_add_request;
2096 ring->flush = gen8_render_ring_flush;
2097 ring->irq_get = gen8_ring_get_irq;
2098 ring->irq_put = gen8_ring_put_irq;
2099 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2100 ring->get_seqno = gen6_ring_get_seqno;
2101 ring->set_seqno = ring_set_seqno;
2102 if (i915_semaphore_is_enabled(dev)) {
2103 WARN_ON(!dev_priv->semaphore_obj);
2104 ring->semaphore.sync_to = gen8_ring_sync;
2105 ring->semaphore.signal = gen8_rcs_signal;
2106 GEN8_RING_SEMAPHORE_INIT;
2107 }
2108 } else if (INTEL_INFO(dev)->gen >= 6) {
1950 ring->add_request = gen6_add_request; 2109 ring->add_request = gen6_add_request;
1951 ring->flush = gen7_render_ring_flush; 2110 ring->flush = gen7_render_ring_flush;
1952 if (INTEL_INFO(dev)->gen == 6) 2111 if (INTEL_INFO(dev)->gen == 6)
1953 ring->flush = gen6_render_ring_flush; 2112 ring->flush = gen6_render_ring_flush;
1954 if (INTEL_INFO(dev)->gen >= 8) { 2113 ring->irq_get = gen6_ring_get_irq;
1955 ring->flush = gen8_render_ring_flush; 2114 ring->irq_put = gen6_ring_put_irq;
1956 ring->irq_get = gen8_ring_get_irq;
1957 ring->irq_put = gen8_ring_put_irq;
1958 } else {
1959 ring->irq_get = gen6_ring_get_irq;
1960 ring->irq_put = gen6_ring_put_irq;
1961 }
1962 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2115 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1963 ring->get_seqno = gen6_ring_get_seqno; 2116 ring->get_seqno = gen6_ring_get_seqno;
1964 ring->set_seqno = ring_set_seqno; 2117 ring->set_seqno = ring_set_seqno;
1965 ring->semaphore.sync_to = gen6_ring_sync; 2118 if (i915_semaphore_is_enabled(dev)) {
1966 ring->semaphore.signal = gen6_signal; 2119 ring->semaphore.sync_to = gen6_ring_sync;
1967 /* 2120 ring->semaphore.signal = gen6_signal;
1968 * The current semaphore is only applied on pre-gen8 platform. 2121 /*
1969 * And there is no VCS2 ring on the pre-gen8 platform. So the 2122 * The current semaphore is only applied on pre-gen8
1970 * semaphore between RCS and VCS2 is initialized as INVALID. 2123 * platform. And there is no VCS2 ring on the pre-gen8
1971 * Gen8 will initialize the sema between VCS2 and RCS later. 2124 * platform. So the semaphore between RCS and VCS2 is
1972 */ 2125 * initialized as INVALID. Gen8 will initialize the
1973 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; 2126 * sema between VCS2 and RCS later.
1974 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; 2127 */
1975 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; 2128 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
1976 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; 2129 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
1977 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2130 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
1978 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; 2131 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
1979 ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; 2132 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
1980 ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; 2133 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
1981 ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; 2134 ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
1982 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2135 ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
2136 ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
2137 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2138 }
1983 } else if (IS_GEN5(dev)) { 2139 } else if (IS_GEN5(dev)) {
1984 ring->add_request = pc_render_add_request; 2140 ring->add_request = pc_render_add_request;
1985 ring->flush = gen4_render_ring_flush; 2141 ring->flush = gen4_render_ring_flush;
@@ -2007,6 +2163,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2007 ring->irq_enable_mask = I915_USER_INTERRUPT; 2163 ring->irq_enable_mask = I915_USER_INTERRUPT;
2008 } 2164 }
2009 ring->write_tail = ring_write_tail; 2165 ring->write_tail = ring_write_tail;
2166
2010 if (IS_HASWELL(dev)) 2167 if (IS_HASWELL(dev))
2011 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 2168 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
2012 else if (IS_GEN8(dev)) 2169 else if (IS_GEN8(dev))
@@ -2024,9 +2181,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2024 2181
2025 /* Workaround batchbuffer to combat CS tlb bug. */ 2182 /* Workaround batchbuffer to combat CS tlb bug. */
2026 if (HAS_BROKEN_CS_TLB(dev)) { 2183 if (HAS_BROKEN_CS_TLB(dev)) {
2027 struct drm_i915_gem_object *obj;
2028 int ret;
2029
2030 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); 2184 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
2031 if (obj == NULL) { 2185 if (obj == NULL) {
2032 DRM_ERROR("Failed to allocate batch bo\n"); 2186 DRM_ERROR("Failed to allocate batch bo\n");
@@ -2157,31 +2311,32 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2157 ring->irq_put = gen8_ring_put_irq; 2311 ring->irq_put = gen8_ring_put_irq;
2158 ring->dispatch_execbuffer = 2312 ring->dispatch_execbuffer =
2159 gen8_ring_dispatch_execbuffer; 2313 gen8_ring_dispatch_execbuffer;
2314 if (i915_semaphore_is_enabled(dev)) {
2315 ring->semaphore.sync_to = gen8_ring_sync;
2316 ring->semaphore.signal = gen8_xcs_signal;
2317 GEN8_RING_SEMAPHORE_INIT;
2318 }
2160 } else { 2319 } else {
2161 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2320 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2162 ring->irq_get = gen6_ring_get_irq; 2321 ring->irq_get = gen6_ring_get_irq;
2163 ring->irq_put = gen6_ring_put_irq; 2322 ring->irq_put = gen6_ring_put_irq;
2164 ring->dispatch_execbuffer = 2323 ring->dispatch_execbuffer =
2165 gen6_ring_dispatch_execbuffer; 2324 gen6_ring_dispatch_execbuffer;
2325 if (i915_semaphore_is_enabled(dev)) {
2326 ring->semaphore.sync_to = gen6_ring_sync;
2327 ring->semaphore.signal = gen6_signal;
2328 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
2329 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2330 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
2331 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
2332 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2333 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
2334 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
2335 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
2336 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
2337 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2338 }
2166 } 2339 }
2167 ring->semaphore.sync_to = gen6_ring_sync;
2168 ring->semaphore.signal = gen6_signal;
2169 /*
2170 * The current semaphore is only applied on pre-gen8 platform.
2171 * And there is no VCS2 ring on the pre-gen8 platform. So the
2172 * semaphore between VCS and VCS2 is initialized as INVALID.
2173 * Gen8 will initialize the sema between VCS2 and VCS later.
2174 */
2175 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
2176 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2177 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
2178 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
2179 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2180 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
2181 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
2182 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
2183 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
2184 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2185 } else { 2340 } else {
2186 ring->mmio_base = BSD_RING_BASE; 2341 ring->mmio_base = BSD_RING_BASE;
2187 ring->flush = bsd_ring_flush; 2342 ring->flush = bsd_ring_flush;
@@ -2218,7 +2373,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
2218 return -EINVAL; 2373 return -EINVAL;
2219 } 2374 }
2220 2375
2221 ring->name = "bds2_ring"; 2376 ring->name = "bsd2 ring";
2222 ring->id = VCS2; 2377 ring->id = VCS2;
2223 2378
2224 ring->write_tail = ring_write_tail; 2379 ring->write_tail = ring_write_tail;
@@ -2233,25 +2388,11 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
2233 ring->irq_put = gen8_ring_put_irq; 2388 ring->irq_put = gen8_ring_put_irq;
2234 ring->dispatch_execbuffer = 2389 ring->dispatch_execbuffer =
2235 gen8_ring_dispatch_execbuffer; 2390 gen8_ring_dispatch_execbuffer;
2236 ring->semaphore.sync_to = gen6_ring_sync; 2391 if (i915_semaphore_is_enabled(dev)) {
2237 ring->semaphore.signal = gen6_signal; 2392 ring->semaphore.sync_to = gen8_ring_sync;
2238 /* 2393 ring->semaphore.signal = gen8_xcs_signal;
2239 * The current semaphore is only applied on the pre-gen8. And there 2394 GEN8_RING_SEMAPHORE_INIT;
2240 * is no bsd2 ring on the pre-gen8. So now the semaphore_register 2395 }
2241 * between VCS2 and other ring is initialized as invalid.
2242 * Gen8 will initialize the sema between VCS2 and other ring later.
2243 */
2244 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
2245 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2246 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2247 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2248 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2249 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
2250 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
2251 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
2252 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
2253 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2254
2255 ring->init = init_ring_common; 2396 ring->init = init_ring_common;
2256 2397
2257 return intel_init_ring_buffer(dev, ring); 2398 return intel_init_ring_buffer(dev, ring);
@@ -2277,30 +2418,38 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
2277 ring->irq_get = gen8_ring_get_irq; 2418 ring->irq_get = gen8_ring_get_irq;
2278 ring->irq_put = gen8_ring_put_irq; 2419 ring->irq_put = gen8_ring_put_irq;
2279 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2420 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2421 if (i915_semaphore_is_enabled(dev)) {
2422 ring->semaphore.sync_to = gen8_ring_sync;
2423 ring->semaphore.signal = gen8_xcs_signal;
2424 GEN8_RING_SEMAPHORE_INIT;
2425 }
2280 } else { 2426 } else {
2281 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2427 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2282 ring->irq_get = gen6_ring_get_irq; 2428 ring->irq_get = gen6_ring_get_irq;
2283 ring->irq_put = gen6_ring_put_irq; 2429 ring->irq_put = gen6_ring_put_irq;
2284 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2430 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2431 if (i915_semaphore_is_enabled(dev)) {
2432 ring->semaphore.signal = gen6_signal;
2433 ring->semaphore.sync_to = gen6_ring_sync;
2434 /*
2435 * The current semaphore is only applied on pre-gen8
2436 * platform. And there is no VCS2 ring on the pre-gen8
2437 * platform. So the semaphore between BCS and VCS2 is
2438 * initialized as INVALID. Gen8 will initialize the
2439 * sema between BCS and VCS2 later.
2440 */
2441 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
2442 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
2443 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2444 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
2445 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2446 ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
2447 ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
2448 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
2449 ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
2450 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2451 }
2285 } 2452 }
2286 ring->semaphore.sync_to = gen6_ring_sync;
2287 ring->semaphore.signal = gen6_signal;
2288 /*
2289 * The current semaphore is only applied on pre-gen8 platform. And
2290 * there is no VCS2 ring on the pre-gen8 platform. So the semaphore
2291 * between BCS and VCS2 is initialized as INVALID.
2292 * Gen8 will initialize the sema between BCS and VCS2 later.
2293 */
2294 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
2295 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
2296 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2297 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
2298 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2299 ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
2300 ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
2301 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
2302 ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
2303 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2304 ring->init = init_ring_common; 2453 ring->init = init_ring_common;
2305 2454
2306 return intel_init_ring_buffer(dev, ring); 2455 return intel_init_ring_buffer(dev, ring);
@@ -2327,24 +2476,31 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
2327 ring->irq_get = gen8_ring_get_irq; 2476 ring->irq_get = gen8_ring_get_irq;
2328 ring->irq_put = gen8_ring_put_irq; 2477 ring->irq_put = gen8_ring_put_irq;
2329 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2478 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2479 if (i915_semaphore_is_enabled(dev)) {
2480 ring->semaphore.sync_to = gen8_ring_sync;
2481 ring->semaphore.signal = gen8_xcs_signal;
2482 GEN8_RING_SEMAPHORE_INIT;
2483 }
2330 } else { 2484 } else {
2331 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2485 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2332 ring->irq_get = hsw_vebox_get_irq; 2486 ring->irq_get = hsw_vebox_get_irq;
2333 ring->irq_put = hsw_vebox_put_irq; 2487 ring->irq_put = hsw_vebox_put_irq;
2334 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2488 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2489 if (i915_semaphore_is_enabled(dev)) {
2490 ring->semaphore.sync_to = gen6_ring_sync;
2491 ring->semaphore.signal = gen6_signal;
2492 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
2493 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
2494 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
2495 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2496 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2497 ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
2498 ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
2499 ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
2500 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
2501 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2502 }
2335 } 2503 }
2336 ring->semaphore.sync_to = gen6_ring_sync;
2337 ring->semaphore.signal = gen6_signal;
2338 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
2339 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
2340 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
2341 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2342 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2343 ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
2344 ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
2345 ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
2346 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
2347 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2348 ring->init = init_ring_common; 2504 ring->init = init_ring_common;
2349 2505
2350 return intel_init_ring_buffer(dev, ring); 2506 return intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index e72017bdcd7f..ed5941078f92 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -40,6 +40,32 @@ struct intel_hw_status_page {
40#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) 40#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
41#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) 41#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
42 42
43/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
44 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
45 */
46#define i915_semaphore_seqno_size sizeof(uint64_t)
47#define GEN8_SIGNAL_OFFSET(__ring, to) \
48 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
49 ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
50 (i915_semaphore_seqno_size * (to)))
51
52#define GEN8_WAIT_OFFSET(__ring, from) \
53 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
54 ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
55 (i915_semaphore_seqno_size * (__ring)->id))
56
57#define GEN8_RING_SEMAPHORE_INIT do { \
58 if (!dev_priv->semaphore_obj) { \
59 break; \
60 } \
61 ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
62 ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
63 ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
64 ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
65 ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
66 ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
67 } while(0)
68
43enum intel_ring_hangcheck_action { 69enum intel_ring_hangcheck_action {
44 HANGCHECK_IDLE = 0, 70 HANGCHECK_IDLE = 0,
45 HANGCHECK_WAIT, 71 HANGCHECK_WAIT,
@@ -127,15 +153,55 @@ struct intel_engine_cs {
127#define I915_DISPATCH_PINNED 0x2 153#define I915_DISPATCH_PINNED 0x2
128 void (*cleanup)(struct intel_engine_cs *ring); 154 void (*cleanup)(struct intel_engine_cs *ring);
129 155
156 /* GEN8 signal/wait table - never trust comments!
157 * signal to signal to signal to signal to signal to
158 * RCS VCS BCS VECS VCS2
159 * --------------------------------------------------------------------
160 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
161 * |-------------------------------------------------------------------
162 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
163 * |-------------------------------------------------------------------
164 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
165 * |-------------------------------------------------------------------
166 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
167 * |-------------------------------------------------------------------
168 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
169 * |-------------------------------------------------------------------
170 *
171 * Generalization:
172 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
173 * ie. transpose of g(x, y)
174 *
175 * sync from sync from sync from sync from sync from
176 * RCS VCS BCS VECS VCS2
177 * --------------------------------------------------------------------
178 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
179 * |-------------------------------------------------------------------
180 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
181 * |-------------------------------------------------------------------
182 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
183 * |-------------------------------------------------------------------
184 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
185 * |-------------------------------------------------------------------
186 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
187 * |-------------------------------------------------------------------
188 *
189 * Generalization:
190 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
191 * ie. transpose of f(x, y)
192 */
130 struct { 193 struct {
131 u32 sync_seqno[I915_NUM_RINGS-1]; 194 u32 sync_seqno[I915_NUM_RINGS-1];
132 195
133 struct { 196 union {
134 /* our mbox written by others */ 197 struct {
135 u32 wait[I915_NUM_RINGS]; 198 /* our mbox written by others */
136 /* mboxes this ring signals to */ 199 u32 wait[I915_NUM_RINGS];
137 u32 signal[I915_NUM_RINGS]; 200 /* mboxes this ring signals to */
138 } mbox; 201 u32 signal[I915_NUM_RINGS];
202 } mbox;
203 u64 signal_ggtt[I915_NUM_RINGS];
204 };
139 205
140 /* AKA wait() */ 206 /* AKA wait() */
141 int (*sync_to)(struct intel_engine_cs *ring, 207 int (*sync_to)(struct intel_engine_cs *ring,
@@ -238,9 +304,11 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
238 int idx; 304 int idx;
239 305
240 /* 306 /*
241 * cs -> 0 = vcs, 1 = bcs 307 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
242 * vcs -> 0 = bcs, 1 = cs, 308 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
243 * bcs -> 0 = cs, 1 = vcs. 309 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
310 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
311 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
244 */ 312 */
245 313
246 idx = (other - ring) - 1; 314 idx = (other - ring) - 1;
@@ -318,9 +386,9 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev);
318u64 intel_ring_get_active_head(struct intel_engine_cs *ring); 386u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
319void intel_ring_setup_status_page(struct intel_engine_cs *ring); 387void intel_ring_setup_status_page(struct intel_engine_cs *ring);
320 388
321static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring) 389static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
322{ 390{
323 return ring->buffer->tail; 391 return ringbuf->tail;
324} 392}
325 393
326static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring) 394static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 20375cc7f82d..9350edd6728d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2433,7 +2433,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
2433 connector->base.unregister = intel_sdvo_connector_unregister; 2433 connector->base.unregister = intel_sdvo_connector_unregister;
2434 2434
2435 intel_connector_attach_encoder(&connector->base, &encoder->base); 2435 intel_connector_attach_encoder(&connector->base, &encoder->base);
2436 ret = drm_sysfs_connector_add(drm_connector); 2436 ret = drm_connector_register(drm_connector);
2437 if (ret < 0) 2437 if (ret < 0)
2438 goto err1; 2438 goto err1;
2439 2439
@@ -2446,7 +2446,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
2446 return 0; 2446 return 0;
2447 2447
2448err2: 2448err2:
2449 drm_sysfs_connector_remove(drm_connector); 2449 drm_connector_unregister(drm_connector);
2450err1: 2450err1:
2451 drm_connector_cleanup(drm_connector); 2451 drm_connector_cleanup(drm_connector);
2452 2452
@@ -2559,7 +2559,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2559 return true; 2559 return true;
2560 2560
2561err: 2561err:
2562 drm_sysfs_connector_remove(connector); 2562 drm_connector_unregister(connector);
2563 intel_sdvo_destroy(connector); 2563 intel_sdvo_destroy(connector);
2564 return false; 2564 return false;
2565} 2565}
@@ -2638,7 +2638,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2638 return true; 2638 return true;
2639 2639
2640err: 2640err:
2641 drm_sysfs_connector_remove(connector); 2641 drm_connector_unregister(connector);
2642 intel_sdvo_destroy(connector); 2642 intel_sdvo_destroy(connector);
2643 return false; 2643 return false;
2644} 2644}
@@ -2711,7 +2711,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
2711 list_for_each_entry_safe(connector, tmp, 2711 list_for_each_entry_safe(connector, tmp,
2712 &dev->mode_config.connector_list, head) { 2712 &dev->mode_config.connector_list, head) {
2713 if (intel_attached_encoder(connector) == &intel_sdvo->base) { 2713 if (intel_attached_encoder(connector) == &intel_sdvo->base) {
2714 drm_sysfs_connector_remove(connector); 2714 drm_connector_unregister(connector);
2715 intel_sdvo_destroy(connector); 2715 intel_sdvo_destroy(connector);
2716 } 2716 }
2717 } 2717 }
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9a17b4e92ef4..168c6652cda1 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -218,7 +218,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
218 218
219 sprctl |= SP_ENABLE; 219 sprctl |= SP_ENABLE;
220 220
221 intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true, 221 intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
222 pixel_size, true,
222 src_w != crtc_w || src_h != crtc_h); 223 src_w != crtc_w || src_h != crtc_h);
223 224
224 /* Sizes are 0 based */ 225 /* Sizes are 0 based */
@@ -283,7 +284,7 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
283 if (atomic_update) 284 if (atomic_update)
284 intel_pipe_update_end(intel_crtc, start_vbl_count); 285 intel_pipe_update_end(intel_crtc, start_vbl_count);
285 286
286 intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false); 287 intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
287} 288}
288 289
289static int 290static int
@@ -406,7 +407,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
406 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 407 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
407 sprctl |= SPRITE_PIPE_CSC_ENABLE; 408 sprctl |= SPRITE_PIPE_CSC_ENABLE;
408 409
409 intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true, 410 intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size,
411 true,
410 src_w != crtc_w || src_h != crtc_h); 412 src_w != crtc_w || src_h != crtc_h);
411 413
412 /* Sizes are 0 based */ 414 /* Sizes are 0 based */
@@ -486,7 +488,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
486 */ 488 */
487 intel_wait_for_vblank(dev, pipe); 489 intel_wait_for_vblank(dev, pipe);
488 490
489 intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false); 491 intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
490} 492}
491 493
492static int 494static int
@@ -606,7 +608,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
606 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ 608 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
607 dvscntr |= DVS_ENABLE; 609 dvscntr |= DVS_ENABLE;
608 610
609 intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true, 611 intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
612 pixel_size, true,
610 src_w != crtc_w || src_h != crtc_h); 613 src_w != crtc_w || src_h != crtc_h);
611 614
612 /* Sizes are 0 based */ 615 /* Sizes are 0 based */
@@ -681,7 +684,7 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
681 */ 684 */
682 intel_wait_for_vblank(dev, pipe); 685 intel_wait_for_vblank(dev, pipe);
683 686
684 intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false); 687 intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
685} 688}
686 689
687static void 690static void
@@ -819,6 +822,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
819 struct drm_device *dev = plane->dev; 822 struct drm_device *dev = plane->dev;
820 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 823 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
821 struct intel_plane *intel_plane = to_intel_plane(plane); 824 struct intel_plane *intel_plane = to_intel_plane(plane);
825 enum pipe pipe = intel_crtc->pipe;
822 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 826 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
823 struct drm_i915_gem_object *obj = intel_fb->obj; 827 struct drm_i915_gem_object *obj = intel_fb->obj;
824 struct drm_i915_gem_object *old_obj = intel_plane->obj; 828 struct drm_i915_gem_object *old_obj = intel_plane->obj;
@@ -1006,6 +1010,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
1006 */ 1010 */
1007 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 1011 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
1008 1012
1013 i915_gem_track_fb(old_obj, obj,
1014 INTEL_FRONTBUFFER_SPRITE(pipe));
1009 mutex_unlock(&dev->struct_mutex); 1015 mutex_unlock(&dev->struct_mutex);
1010 1016
1011 if (ret) 1017 if (ret)
@@ -1039,6 +1045,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
1039 else 1045 else
1040 intel_plane->disable_plane(plane, crtc); 1046 intel_plane->disable_plane(plane, crtc);
1041 1047
1048 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
1049
1042 if (!primary_was_enabled && primary_enabled) 1050 if (!primary_was_enabled && primary_enabled)
1043 intel_post_enable_primary(crtc); 1051 intel_post_enable_primary(crtc);
1044 } 1052 }
@@ -1068,6 +1076,7 @@ intel_disable_plane(struct drm_plane *plane)
1068 struct drm_device *dev = plane->dev; 1076 struct drm_device *dev = plane->dev;
1069 struct intel_plane *intel_plane = to_intel_plane(plane); 1077 struct intel_plane *intel_plane = to_intel_plane(plane);
1070 struct intel_crtc *intel_crtc; 1078 struct intel_crtc *intel_crtc;
1079 enum pipe pipe;
1071 1080
1072 if (!plane->fb) 1081 if (!plane->fb)
1073 return 0; 1082 return 0;
@@ -1076,6 +1085,7 @@ intel_disable_plane(struct drm_plane *plane)
1076 return -EINVAL; 1085 return -EINVAL;
1077 1086
1078 intel_crtc = to_intel_crtc(plane->crtc); 1087 intel_crtc = to_intel_crtc(plane->crtc);
1088 pipe = intel_crtc->pipe;
1079 1089
1080 if (intel_crtc->active) { 1090 if (intel_crtc->active) {
1081 bool primary_was_enabled = intel_crtc->primary_enabled; 1091 bool primary_was_enabled = intel_crtc->primary_enabled;
@@ -1094,6 +1104,8 @@ intel_disable_plane(struct drm_plane *plane)
1094 1104
1095 mutex_lock(&dev->struct_mutex); 1105 mutex_lock(&dev->struct_mutex);
1096 intel_unpin_fb_obj(intel_plane->obj); 1106 intel_unpin_fb_obj(intel_plane->obj);
1107 i915_gem_track_fb(intel_plane->obj, NULL,
1108 INTEL_FRONTBUFFER_SPRITE(pipe));
1097 mutex_unlock(&dev->struct_mutex); 1109 mutex_unlock(&dev->struct_mutex);
1098 1110
1099 intel_plane->obj = NULL; 1111 intel_plane->obj = NULL;
@@ -1114,7 +1126,6 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1114 struct drm_file *file_priv) 1126 struct drm_file *file_priv)
1115{ 1127{
1116 struct drm_intel_sprite_colorkey *set = data; 1128 struct drm_intel_sprite_colorkey *set = data;
1117 struct drm_mode_object *obj;
1118 struct drm_plane *plane; 1129 struct drm_plane *plane;
1119 struct intel_plane *intel_plane; 1130 struct intel_plane *intel_plane;
1120 int ret = 0; 1131 int ret = 0;
@@ -1128,13 +1139,12 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1128 1139
1129 drm_modeset_lock_all(dev); 1140 drm_modeset_lock_all(dev);
1130 1141
1131 obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE); 1142 plane = drm_plane_find(dev, set->plane_id);
1132 if (!obj) { 1143 if (!plane) {
1133 ret = -ENOENT; 1144 ret = -ENOENT;
1134 goto out_unlock; 1145 goto out_unlock;
1135 } 1146 }
1136 1147
1137 plane = obj_to_plane(obj);
1138 intel_plane = to_intel_plane(plane); 1148 intel_plane = to_intel_plane(plane);
1139 ret = intel_plane->update_colorkey(plane, set); 1149 ret = intel_plane->update_colorkey(plane, set);
1140 1150
@@ -1147,7 +1157,6 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
1147 struct drm_file *file_priv) 1157 struct drm_file *file_priv)
1148{ 1158{
1149 struct drm_intel_sprite_colorkey *get = data; 1159 struct drm_intel_sprite_colorkey *get = data;
1150 struct drm_mode_object *obj;
1151 struct drm_plane *plane; 1160 struct drm_plane *plane;
1152 struct intel_plane *intel_plane; 1161 struct intel_plane *intel_plane;
1153 int ret = 0; 1162 int ret = 0;
@@ -1157,13 +1166,12 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
1157 1166
1158 drm_modeset_lock_all(dev); 1167 drm_modeset_lock_all(dev);
1159 1168
1160 obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE); 1169 plane = drm_plane_find(dev, get->plane_id);
1161 if (!obj) { 1170 if (!plane) {
1162 ret = -ENOENT; 1171 ret = -ENOENT;
1163 goto out_unlock; 1172 goto out_unlock;
1164 } 1173 }
1165 1174
1166 plane = obj_to_plane(obj);
1167 intel_plane = to_intel_plane(plane); 1175 intel_plane = to_intel_plane(plane);
1168 intel_plane->get_colorkey(plane, get); 1176 intel_plane->get_colorkey(plane, get);
1169 1177
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 67c6c9a2eb1c..e211eef4b7e4 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1680,5 +1680,5 @@ intel_tv_init(struct drm_device *dev)
1680 drm_object_attach_property(&connector->base, 1680 drm_object_attach_property(&connector->base,
1681 dev->mode_config.tv_bottom_margin_property, 1681 dev->mode_config.tv_bottom_margin_property,
1682 intel_tv->margin[TV_MARGIN_BOTTOM]); 1682 intel_tv->margin[TV_MARGIN_BOTTOM]);
1683 drm_sysfs_connector_add(connector); 1683 drm_connector_register(connector);
1684} 1684}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4f6fef7ac069..e81bc3bdc533 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -231,8 +231,8 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
231 } 231 }
232 232
233 /* WaRsForcewakeWaitTC0:vlv */ 233 /* WaRsForcewakeWaitTC0:vlv */
234 __gen6_gt_wait_for_thread_c0(dev_priv); 234 if (!IS_CHERRYVIEW(dev_priv->dev))
235 235 __gen6_gt_wait_for_thread_c0(dev_priv);
236} 236}
237 237
238static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, 238static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
@@ -250,9 +250,10 @@ static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
250 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 250 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
251 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 251 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
252 252
253 /* The below doubles as a POSTING_READ */ 253 /* something from same cacheline, but !FORCEWAKE_VLV */
254 gen6_gt_check_fifodbg(dev_priv); 254 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
255 255 if (!IS_CHERRYVIEW(dev_priv->dev))
256 gen6_gt_check_fifodbg(dev_priv);
256} 257}
257 258
258static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) 259static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
@@ -315,7 +316,7 @@ static void gen6_force_wake_timer(unsigned long arg)
315 intel_runtime_pm_put(dev_priv); 316 intel_runtime_pm_put(dev_priv);
316} 317}
317 318
318static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) 319void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
319{ 320{
320 struct drm_i915_private *dev_priv = dev->dev_private; 321 struct drm_i915_private *dev_priv = dev->dev_private;
321 unsigned long irqflags; 322 unsigned long irqflags;
@@ -357,16 +358,12 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
357 dev_priv->uncore.fifo_count = 358 dev_priv->uncore.fifo_count =
358 __raw_i915_read32(dev_priv, GTFIFOCTL) & 359 __raw_i915_read32(dev_priv, GTFIFOCTL) &
359 GT_FIFO_FREE_ENTRIES_MASK; 360 GT_FIFO_FREE_ENTRIES_MASK;
360 } else {
361 dev_priv->uncore.forcewake_count = 0;
362 dev_priv->uncore.fw_rendercount = 0;
363 dev_priv->uncore.fw_mediacount = 0;
364 } 361 }
365 362
366 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 363 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
367} 364}
368 365
369void intel_uncore_early_sanitize(struct drm_device *dev) 366void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
370{ 367{
371 struct drm_i915_private *dev_priv = dev->dev_private; 368 struct drm_i915_private *dev_priv = dev->dev_private;
372 369
@@ -389,7 +386,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
389 __raw_i915_write32(dev_priv, GTFIFODBG, 386 __raw_i915_write32(dev_priv, GTFIFODBG,
390 __raw_i915_read32(dev_priv, GTFIFODBG)); 387 __raw_i915_read32(dev_priv, GTFIFODBG));
391 388
392 intel_uncore_forcewake_reset(dev, false); 389 intel_uncore_forcewake_reset(dev, restore_forcewake);
393} 390}
394 391
395void intel_uncore_sanitize(struct drm_device *dev) 392void intel_uncore_sanitize(struct drm_device *dev)
@@ -469,16 +466,43 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
469#define NEEDS_FORCE_WAKE(dev_priv, reg) \ 466#define NEEDS_FORCE_WAKE(dev_priv, reg) \
470 ((reg) < 0x40000 && (reg) != FORCEWAKE) 467 ((reg) < 0x40000 && (reg) != FORCEWAKE)
471 468
472#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ 469#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
473 (((reg) >= 0x2000 && (reg) < 0x4000) ||\
474 ((reg) >= 0x5000 && (reg) < 0x8000) ||\
475 ((reg) >= 0xB000 && (reg) < 0x12000) ||\
476 ((reg) >= 0x2E000 && (reg) < 0x30000))
477 470
478#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\ 471#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
479 (((reg) >= 0x12000 && (reg) < 0x14000) ||\ 472 (REG_RANGE((reg), 0x2000, 0x4000) || \
480 ((reg) >= 0x22000 && (reg) < 0x24000) ||\ 473 REG_RANGE((reg), 0x5000, 0x8000) || \
481 ((reg) >= 0x30000 && (reg) < 0x40000)) 474 REG_RANGE((reg), 0xB000, 0x12000) || \
475 REG_RANGE((reg), 0x2E000, 0x30000))
476
477#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
478 (REG_RANGE((reg), 0x12000, 0x14000) || \
479 REG_RANGE((reg), 0x22000, 0x24000) || \
480 REG_RANGE((reg), 0x30000, 0x40000))
481
482#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
483 (REG_RANGE((reg), 0x2000, 0x4000) || \
484 REG_RANGE((reg), 0x5000, 0x8000) || \
485 REG_RANGE((reg), 0x8300, 0x8500) || \
486 REG_RANGE((reg), 0xB000, 0xC000) || \
487 REG_RANGE((reg), 0xE000, 0xE800))
488
489#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
490 (REG_RANGE((reg), 0x8800, 0x8900) || \
491 REG_RANGE((reg), 0xD000, 0xD800) || \
492 REG_RANGE((reg), 0x12000, 0x14000) || \
493 REG_RANGE((reg), 0x1A000, 0x1C000) || \
494 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
495 REG_RANGE((reg), 0x30000, 0x40000))
496
497#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
498 (REG_RANGE((reg), 0x4000, 0x5000) || \
499 REG_RANGE((reg), 0x8000, 0x8300) || \
500 REG_RANGE((reg), 0x8500, 0x8600) || \
501 REG_RANGE((reg), 0x9000, 0xB000) || \
502 REG_RANGE((reg), 0xC000, 0xC800) || \
503 REG_RANGE((reg), 0xF000, 0x10000) || \
504 REG_RANGE((reg), 0x14000, 0x14400) || \
505 REG_RANGE((reg), 0x22000, 0x24000))
482 506
483static void 507static void
484ilk_dummy_write(struct drm_i915_private *dev_priv) 508ilk_dummy_write(struct drm_i915_private *dev_priv)
@@ -490,20 +514,30 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
490} 514}
491 515
492static void 516static void
493hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) 517hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
518 bool before)
494{ 519{
520 const char *op = read ? "reading" : "writing to";
521 const char *when = before ? "before" : "after";
522
523 if (!i915.mmio_debug)
524 return;
525
495 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 526 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
496 DRM_ERROR("Unknown unclaimed register before writing to %x\n", 527 WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
497 reg); 528 when, op, reg);
498 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 529 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
499 } 530 }
500} 531}
501 532
502static void 533static void
503hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) 534hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
504{ 535{
536 if (i915.mmio_debug)
537 return;
538
505 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 539 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
506 DRM_ERROR("Unclaimed write to %x\n", reg); 540 DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
507 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 541 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
508 } 542 }
509} 543}
@@ -540,6 +574,7 @@ gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
540static u##x \ 574static u##x \
541gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 575gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
542 REG_READ_HEADER(x); \ 576 REG_READ_HEADER(x); \
577 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
543 if (dev_priv->uncore.forcewake_count == 0 && \ 578 if (dev_priv->uncore.forcewake_count == 0 && \
544 NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 579 NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
545 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 580 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
@@ -550,6 +585,7 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
550 } else { \ 585 } else { \
551 val = __raw_i915_read##x(dev_priv, reg); \ 586 val = __raw_i915_read##x(dev_priv, reg); \
552 } \ 587 } \
588 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
553 REG_READ_FOOTER; \ 589 REG_READ_FOOTER; \
554} 590}
555 591
@@ -573,7 +609,35 @@ vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
573 REG_READ_FOOTER; \ 609 REG_READ_FOOTER; \
574} 610}
575 611
612#define __chv_read(x) \
613static u##x \
614chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
615 unsigned fwengine = 0; \
616 REG_READ_HEADER(x); \
617 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
618 if (dev_priv->uncore.fw_rendercount == 0) \
619 fwengine = FORCEWAKE_RENDER; \
620 } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
621 if (dev_priv->uncore.fw_mediacount == 0) \
622 fwengine = FORCEWAKE_MEDIA; \
623 } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
624 if (dev_priv->uncore.fw_rendercount == 0) \
625 fwengine |= FORCEWAKE_RENDER; \
626 if (dev_priv->uncore.fw_mediacount == 0) \
627 fwengine |= FORCEWAKE_MEDIA; \
628 } \
629 if (fwengine) \
630 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
631 val = __raw_i915_read##x(dev_priv, reg); \
632 if (fwengine) \
633 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
634 REG_READ_FOOTER; \
635}
576 636
637__chv_read(8)
638__chv_read(16)
639__chv_read(32)
640__chv_read(64)
577__vlv_read(8) 641__vlv_read(8)
578__vlv_read(16) 642__vlv_read(16)
579__vlv_read(32) 643__vlv_read(32)
@@ -591,6 +655,7 @@ __gen4_read(16)
591__gen4_read(32) 655__gen4_read(32)
592__gen4_read(64) 656__gen4_read(64)
593 657
658#undef __chv_read
594#undef __vlv_read 659#undef __vlv_read
595#undef __gen6_read 660#undef __gen6_read
596#undef __gen5_read 661#undef __gen5_read
@@ -647,12 +712,13 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
647 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 712 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
648 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 713 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
649 } \ 714 } \
650 hsw_unclaimed_reg_clear(dev_priv, reg); \ 715 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
651 __raw_i915_write##x(dev_priv, reg, val); \ 716 __raw_i915_write##x(dev_priv, reg, val); \
652 if (unlikely(__fifo_ret)) { \ 717 if (unlikely(__fifo_ret)) { \
653 gen6_gt_check_fifodbg(dev_priv); \ 718 gen6_gt_check_fifodbg(dev_priv); \
654 } \ 719 } \
655 hsw_unclaimed_reg_check(dev_priv, reg); \ 720 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
721 hsw_unclaimed_reg_detect(dev_priv); \
656 REG_WRITE_FOOTER; \ 722 REG_WRITE_FOOTER; \
657} 723}
658 724
@@ -681,6 +747,7 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
681static void \ 747static void \
682gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 748gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
683 REG_WRITE_HEADER; \ 749 REG_WRITE_HEADER; \
750 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
684 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \ 751 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
685 if (dev_priv->uncore.forcewake_count == 0) \ 752 if (dev_priv->uncore.forcewake_count == 0) \
686 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 753 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
@@ -692,9 +759,43 @@ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
692 } else { \ 759 } else { \
693 __raw_i915_write##x(dev_priv, reg, val); \ 760 __raw_i915_write##x(dev_priv, reg, val); \
694 } \ 761 } \
762 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
763 hsw_unclaimed_reg_detect(dev_priv); \
695 REG_WRITE_FOOTER; \ 764 REG_WRITE_FOOTER; \
696} 765}
697 766
767#define __chv_write(x) \
768static void \
769chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
770 unsigned fwengine = 0; \
771 bool shadowed = is_gen8_shadowed(dev_priv, reg); \
772 REG_WRITE_HEADER; \
773 if (!shadowed) { \
774 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
775 if (dev_priv->uncore.fw_rendercount == 0) \
776 fwengine = FORCEWAKE_RENDER; \
777 } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
778 if (dev_priv->uncore.fw_mediacount == 0) \
779 fwengine = FORCEWAKE_MEDIA; \
780 } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
781 if (dev_priv->uncore.fw_rendercount == 0) \
782 fwengine |= FORCEWAKE_RENDER; \
783 if (dev_priv->uncore.fw_mediacount == 0) \
784 fwengine |= FORCEWAKE_MEDIA; \
785 } \
786 } \
787 if (fwengine) \
788 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
789 __raw_i915_write##x(dev_priv, reg, val); \
790 if (fwengine) \
791 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
792 REG_WRITE_FOOTER; \
793}
794
795__chv_write(8)
796__chv_write(16)
797__chv_write(32)
798__chv_write(64)
698__gen8_write(8) 799__gen8_write(8)
699__gen8_write(16) 800__gen8_write(16)
700__gen8_write(32) 801__gen8_write(32)
@@ -716,6 +817,7 @@ __gen4_write(16)
716__gen4_write(32) 817__gen4_write(32)
717__gen4_write(64) 818__gen4_write(64)
718 819
820#undef __chv_write
719#undef __gen8_write 821#undef __gen8_write
720#undef __hsw_write 822#undef __hsw_write
721#undef __gen6_write 823#undef __gen6_write
@@ -731,7 +833,7 @@ void intel_uncore_init(struct drm_device *dev)
731 setup_timer(&dev_priv->uncore.force_wake_timer, 833 setup_timer(&dev_priv->uncore.force_wake_timer,
732 gen6_force_wake_timer, (unsigned long)dev_priv); 834 gen6_force_wake_timer, (unsigned long)dev_priv);
733 835
734 intel_uncore_early_sanitize(dev); 836 intel_uncore_early_sanitize(dev, false);
735 837
736 if (IS_VALLEYVIEW(dev)) { 838 if (IS_VALLEYVIEW(dev)) {
737 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; 839 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
@@ -779,14 +881,26 @@ void intel_uncore_init(struct drm_device *dev)
779 881
780 switch (INTEL_INFO(dev)->gen) { 882 switch (INTEL_INFO(dev)->gen) {
781 default: 883 default:
782 dev_priv->uncore.funcs.mmio_writeb = gen8_write8; 884 if (IS_CHERRYVIEW(dev)) {
783 dev_priv->uncore.funcs.mmio_writew = gen8_write16; 885 dev_priv->uncore.funcs.mmio_writeb = chv_write8;
784 dev_priv->uncore.funcs.mmio_writel = gen8_write32; 886 dev_priv->uncore.funcs.mmio_writew = chv_write16;
785 dev_priv->uncore.funcs.mmio_writeq = gen8_write64; 887 dev_priv->uncore.funcs.mmio_writel = chv_write32;
786 dev_priv->uncore.funcs.mmio_readb = gen6_read8; 888 dev_priv->uncore.funcs.mmio_writeq = chv_write64;
787 dev_priv->uncore.funcs.mmio_readw = gen6_read16; 889 dev_priv->uncore.funcs.mmio_readb = chv_read8;
788 dev_priv->uncore.funcs.mmio_readl = gen6_read32; 890 dev_priv->uncore.funcs.mmio_readw = chv_read16;
789 dev_priv->uncore.funcs.mmio_readq = gen6_read64; 891 dev_priv->uncore.funcs.mmio_readl = chv_read32;
892 dev_priv->uncore.funcs.mmio_readq = chv_read64;
893
894 } else {
895 dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
896 dev_priv->uncore.funcs.mmio_writew = gen8_write16;
897 dev_priv->uncore.funcs.mmio_writel = gen8_write32;
898 dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
899 dev_priv->uncore.funcs.mmio_readb = gen6_read8;
900 dev_priv->uncore.funcs.mmio_readw = gen6_read16;
901 dev_priv->uncore.funcs.mmio_readl = gen6_read32;
902 dev_priv->uncore.funcs.mmio_readq = gen6_read64;
903 }
790 break; 904 break;
791 case 7: 905 case 7:
792 case 6: 906 case 6:
@@ -912,7 +1026,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
912 if (args->flags || args->pad) 1026 if (args->flags || args->pad)
913 return -EINVAL; 1027 return -EINVAL;
914 1028
915 if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN)) 1029 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
916 return -EPERM; 1030 return -EPERM;
917 1031
918 ret = mutex_lock_interruptible(&dev->struct_mutex); 1032 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1053,18 +1167,16 @@ static int gen6_do_reset(struct drm_device *dev)
1053 1167
1054int intel_gpu_reset(struct drm_device *dev) 1168int intel_gpu_reset(struct drm_device *dev)
1055{ 1169{
1056 switch (INTEL_INFO(dev)->gen) { 1170 if (INTEL_INFO(dev)->gen >= 6)
1057 case 8: 1171 return gen6_do_reset(dev);
1058 case 7: 1172 else if (IS_GEN5(dev))
1059 case 6: return gen6_do_reset(dev); 1173 return ironlake_do_reset(dev);
1060 case 5: return ironlake_do_reset(dev); 1174 else if (IS_G4X(dev))
1061 case 4: 1175 return g4x_do_reset(dev);
1062 if (IS_G4X(dev)) 1176 else if (IS_GEN4(dev))
1063 return g4x_do_reset(dev); 1177 return i965_do_reset(dev);
1064 else 1178 else
1065 return i965_do_reset(dev); 1179 return -ENODEV;
1066 default: return -ENODEV;
1067 }
1068} 1180}
1069 1181
1070void intel_uncore_check_errors(struct drm_device *dev) 1182void intel_uncore_check_errors(struct drm_device *dev)
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index cf11ee68a6d9..80de23d9b9c9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -280,7 +280,7 @@ static inline int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
280{ 280{
281 int ret; 281 int ret;
282 282
283 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); 283 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
284 if (ret) { 284 if (ret) {
285 if (ret != -ERESTARTSYS && ret != -EBUSY) 285 if (ret != -ERESTARTSYS && ret != -EBUSY)
286 DRM_ERROR("reserve failed %p\n", bo); 286 DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 13b7dd83faa9..5451dc58eff1 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -272,7 +272,7 @@ static int mga_fbdev_destroy(struct drm_device *dev,
272 return 0; 272 return 0;
273} 273}
274 274
275static struct drm_fb_helper_funcs mga_fb_helper_funcs = { 275static const struct drm_fb_helper_funcs mga_fb_helper_funcs = {
276 .gamma_set = mga_crtc_fb_gamma_set, 276 .gamma_set = mga_crtc_fb_gamma_set,
277 .gamma_get = mga_crtc_fb_gamma_get, 277 .gamma_get = mga_crtc_fb_gamma_get,
278 .fb_probe = mgag200fb_create, 278 .fb_probe = mgag200fb_create,
@@ -293,9 +293,10 @@ int mgag200_fbdev_init(struct mga_device *mdev)
293 return -ENOMEM; 293 return -ENOMEM;
294 294
295 mdev->mfbdev = mfbdev; 295 mdev->mfbdev = mfbdev;
296 mfbdev->helper.funcs = &mga_fb_helper_funcs;
297 spin_lock_init(&mfbdev->dirty_lock); 296 spin_lock_init(&mfbdev->dirty_lock);
298 297
298 drm_fb_helper_prepare(mdev->dev, &mfbdev->helper, &mga_fb_helper_funcs);
299
299 ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper, 300 ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
300 mdev->num_crtc, MGAG200FB_CONN_LIMIT); 301 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
301 if (ret) 302 if (ret)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index a034ed408252..45f04dea0ac2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1562,19 +1562,9 @@ static struct drm_encoder *mga_connector_best_encoder(struct drm_connector
1562 *connector) 1562 *connector)
1563{ 1563{
1564 int enc_id = connector->encoder_ids[0]; 1564 int enc_id = connector->encoder_ids[0];
1565 struct drm_mode_object *obj;
1566 struct drm_encoder *encoder;
1567
1568 /* pick the encoder ids */ 1565 /* pick the encoder ids */
1569 if (enc_id) { 1566 if (enc_id)
1570 obj = 1567 return drm_encoder_find(connector->dev, enc_id);
1571 drm_mode_object_find(connector->dev, enc_id,
1572 DRM_MODE_OBJECT_ENCODER);
1573 if (!obj)
1574 return NULL;
1575 encoder = obj_to_encoder(obj);
1576 return encoder;
1577 }
1578 return NULL; 1568 return NULL;
1579} 1569}
1580 1570
@@ -1621,7 +1611,7 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev)
1621 1611
1622 drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs); 1612 drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
1623 1613
1624 drm_sysfs_connector_add(connector); 1614 drm_connector_register(connector);
1625 1615
1626 mga_connector->i2c = mgag200_i2c_create(dev); 1616 mga_connector->i2c = mgag200_i2c_create(dev);
1627 if (!mga_connector->i2c) 1617 if (!mga_connector->i2c)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index f12388967856..c99c50de3226 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -2,7 +2,6 @@
2config DRM_MSM 2config DRM_MSM
3 tristate "MSM DRM" 3 tristate "MSM DRM"
4 depends on DRM 4 depends on DRM
5 depends on MSM_IOMMU
6 depends on ARCH_QCOM || (ARM && COMPILE_TEST) 5 depends on ARCH_QCOM || (ARM && COMPILE_TEST)
7 select DRM_KMS_HELPER 6 select DRM_KMS_HELPER
8 select SHMEM 7 select SHMEM
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 85d615e7d62f..a8a144b38eaa 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16)
18 18
19Copyright (C) 2013 by the following authors: 19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
21 21
22Permission is hereby granted, free of charge, to any person obtaining 22Permission is hereby granted, free of charge, to any person obtaining
@@ -203,6 +203,15 @@ enum a2xx_rb_copy_sample_select {
203 SAMPLE_0123 = 6, 203 SAMPLE_0123 = 6,
204}; 204};
205 205
206enum a2xx_rb_blend_opcode {
207 BLEND_DST_PLUS_SRC = 0,
208 BLEND_SRC_MINUS_DST = 1,
209 BLEND_MIN_DST_SRC = 2,
210 BLEND_MAX_DST_SRC = 3,
211 BLEND_DST_MINUS_SRC = 4,
212 BLEND_DST_PLUS_SRC_BIAS = 5,
213};
214
206enum adreno_mmu_clnt_beh { 215enum adreno_mmu_clnt_beh {
207 BEH_NEVR = 0, 216 BEH_NEVR = 0,
208 BEH_TRAN_RNG = 1, 217 BEH_TRAN_RNG = 1,
@@ -890,6 +899,39 @@ static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
890#define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9 899#define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9
891 900
892#define REG_A2XX_VGT_DRAW_INITIATOR 0x000021fc 901#define REG_A2XX_VGT_DRAW_INITIATOR 0x000021fc
902#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
903#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0
904static inline uint32_t A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val)
905{
906 return ((val) << A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK;
907}
908#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0
909#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6
910static inline uint32_t A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val)
911{
912 return ((val) << A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK;
913}
914#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600
915#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9
916static inline uint32_t A2XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val)
917{
918 return ((val) << A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK;
919}
920#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800
921#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11
922static inline uint32_t A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val)
923{
924 return ((val) << A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK;
925}
926#define A2XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
927#define A2XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
928#define A2XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
929#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000
930#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16
931static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val)
932{
933 return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK;
934}
893 935
894#define REG_A2XX_VGT_IMMED_DATA 0x000021fd 936#define REG_A2XX_VGT_IMMED_DATA 0x000021fd
895 937
@@ -963,7 +1005,7 @@ static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(enum adreno_rb_blend
963} 1005}
964#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0 1006#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0
965#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5 1007#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5
966static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum adreno_rb_blend_opcode val) 1008static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum a2xx_rb_blend_opcode val)
967{ 1009{
968 return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK; 1010 return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK;
969} 1011}
@@ -981,7 +1023,7 @@ static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(enum adreno_rb_blend
981} 1023}
982#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000 1024#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000
983#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21 1025#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21
984static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum adreno_rb_blend_opcode val) 1026static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum a2xx_rb_blend_opcode val)
985{ 1027{
986 return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK; 1028 return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK;
987} 1029}
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index a7be56163d23..303e8a9e91a5 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16)
18 18
19Copyright (C) 2013 by the following authors: 19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
21 21
22Permission is hereby granted, free of charge, to any person obtaining 22Permission is hereby granted, free of charge, to any person obtaining
@@ -41,31 +41,11 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
41*/ 41*/
42 42
43 43
44enum a3xx_render_mode {
45 RB_RENDERING_PASS = 0,
46 RB_TILING_PASS = 1,
47 RB_RESOLVE_PASS = 2,
48};
49
50enum a3xx_tile_mode { 44enum a3xx_tile_mode {
51 LINEAR = 0, 45 LINEAR = 0,
52 TILE_32X32 = 2, 46 TILE_32X32 = 2,
53}; 47};
54 48
55enum a3xx_threadmode {
56 MULTI = 0,
57 SINGLE = 1,
58};
59
60enum a3xx_instrbuffermode {
61 BUFFER = 1,
62};
63
64enum a3xx_threadsize {
65 TWO_QUADS = 0,
66 FOUR_QUADS = 1,
67};
68
69enum a3xx_state_block_id { 49enum a3xx_state_block_id {
70 HLSQ_BLOCK_ID_TP_TEX = 2, 50 HLSQ_BLOCK_ID_TP_TEX = 2,
71 HLSQ_BLOCK_ID_TP_MIPMAP = 3, 51 HLSQ_BLOCK_ID_TP_MIPMAP = 3,
@@ -169,6 +149,8 @@ enum a3xx_color_fmt {
169 RB_R8G8B8A8_UNORM = 8, 149 RB_R8G8B8A8_UNORM = 8,
170 RB_Z16_UNORM = 12, 150 RB_Z16_UNORM = 12,
171 RB_A8_UNORM = 20, 151 RB_A8_UNORM = 20,
152 RB_R16G16B16A16_FLOAT = 27,
153 RB_R32G32B32A32_FLOAT = 51,
172}; 154};
173 155
174enum a3xx_color_swap { 156enum a3xx_color_swap {
@@ -178,12 +160,6 @@ enum a3xx_color_swap {
178 XYZW = 3, 160 XYZW = 3,
179}; 161};
180 162
181enum a3xx_msaa_samples {
182 MSAA_ONE = 0,
183 MSAA_TWO = 1,
184 MSAA_FOUR = 2,
185};
186
187enum a3xx_sp_perfcounter_select { 163enum a3xx_sp_perfcounter_select {
188 SP_FS_CFLOW_INSTRUCTIONS = 12, 164 SP_FS_CFLOW_INSTRUCTIONS = 12,
189 SP_FS_FULL_ALU_INSTRUCTIONS = 14, 165 SP_FS_FULL_ALU_INSTRUCTIONS = 14,
@@ -191,21 +167,45 @@ enum a3xx_sp_perfcounter_select {
191 SP_ALU_ACTIVE_CYCLES = 29, 167 SP_ALU_ACTIVE_CYCLES = 29,
192}; 168};
193 169
194enum adreno_rb_copy_control_mode { 170enum a3xx_rop_code {
195 RB_COPY_RESOLVE = 1, 171 ROP_CLEAR = 0,
196 RB_COPY_DEPTH_STENCIL = 5, 172 ROP_NOR = 1,
173 ROP_AND_INVERTED = 2,
174 ROP_COPY_INVERTED = 3,
175 ROP_AND_REVERSE = 4,
176 ROP_INVERT = 5,
177 ROP_XOR = 6,
178 ROP_NAND = 7,
179 ROP_AND = 8,
180 ROP_EQUIV = 9,
181 ROP_NOOP = 10,
182 ROP_OR_INVERTED = 11,
183 ROP_COPY = 12,
184 ROP_OR_REVERSE = 13,
185 ROP_OR = 14,
186 ROP_SET = 15,
187};
188
189enum a3xx_rb_blend_opcode {
190 BLEND_DST_PLUS_SRC = 0,
191 BLEND_SRC_MINUS_DST = 1,
192 BLEND_DST_MINUS_SRC = 2,
193 BLEND_MIN_DST_SRC = 3,
194 BLEND_MAX_DST_SRC = 4,
197}; 195};
198 196
199enum a3xx_tex_filter { 197enum a3xx_tex_filter {
200 A3XX_TEX_NEAREST = 0, 198 A3XX_TEX_NEAREST = 0,
201 A3XX_TEX_LINEAR = 1, 199 A3XX_TEX_LINEAR = 1,
200 A3XX_TEX_ANISO = 2,
202}; 201};
203 202
204enum a3xx_tex_clamp { 203enum a3xx_tex_clamp {
205 A3XX_TEX_REPEAT = 0, 204 A3XX_TEX_REPEAT = 0,
206 A3XX_TEX_CLAMP_TO_EDGE = 1, 205 A3XX_TEX_CLAMP_TO_EDGE = 1,
207 A3XX_TEX_MIRROR_REPEAT = 2, 206 A3XX_TEX_MIRROR_REPEAT = 2,
208 A3XX_TEX_CLAMP_NONE = 3, 207 A3XX_TEX_CLAMP_TO_BORDER = 3,
208 A3XX_TEX_MIRROR_CLAMP = 4,
209}; 209};
210 210
211enum a3xx_tex_swiz { 211enum a3xx_tex_swiz {
@@ -316,6 +316,7 @@ enum a3xx_tex_type {
316#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064 316#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064
317 317
318#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080 318#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080
319#define A3XX_RBBM_PERFCTR_CTL_ENABLE 0x00000001
319 320
320#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081 321#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081
321 322
@@ -549,6 +550,10 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460
549 550
550#define REG_A3XX_CP_AHB_FAULT 0x0000054d 551#define REG_A3XX_CP_AHB_FAULT 0x0000054d
551 552
553#define REG_A3XX_SP_GLOBAL_MEM_SIZE 0x00000e22
554
555#define REG_A3XX_SP_GLOBAL_MEM_ADDR 0x00000e23
556
552#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040 557#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040
553#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000 558#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000
554#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000 559#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
@@ -556,6 +561,9 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460
556#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000 561#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000
557#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000 562#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000
558#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000 563#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000
564#define A3XX_GRAS_CL_CLIP_CNTL_ZCOORD 0x00800000
565#define A3XX_GRAS_CL_CLIP_CNTL_WCOORD 0x01000000
566#define A3XX_GRAS_CL_CLIP_CNTL_ZCLIP_DISABLE 0x02000000
559 567
560#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044 568#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044
561#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff 569#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
@@ -620,8 +628,26 @@ static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
620} 628}
621 629
622#define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068 630#define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068
631#define A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
632#define A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
633static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MIN(float val)
634{
635 return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
636}
637#define A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
638#define A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
639static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
640{
641 return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
642}
623 643
624#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069 644#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
645#define A3XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
646#define A3XX_GRAS_SU_POINT_SIZE__SHIFT 0
647static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
648{
649 return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK;
650}
625 651
626#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c 652#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
627#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff 653#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff
@@ -743,6 +769,7 @@ static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode va
743#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000 769#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000
744 770
745#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1 771#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1
772#define A3XX_RB_RENDER_CONTROL_FACENESS 0x00000008
746#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0 773#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0
747#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4 774#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4
748static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val) 775static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
@@ -751,6 +778,10 @@ static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
751} 778}
752#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000 779#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
753#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000 780#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
781#define A3XX_RB_RENDER_CONTROL_XCOORD 0x00004000
782#define A3XX_RB_RENDER_CONTROL_YCOORD 0x00008000
783#define A3XX_RB_RENDER_CONTROL_ZCOORD 0x00010000
784#define A3XX_RB_RENDER_CONTROL_WCOORD 0x00020000
754#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000 785#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000
755#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000 786#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
756#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24 787#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
@@ -796,7 +827,7 @@ static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4
796#define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020 827#define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020
797#define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00 828#define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
798#define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8 829#define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
799static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(uint32_t val) 830static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
800{ 831{
801 return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK; 832 return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK;
802} 833}
@@ -856,7 +887,7 @@ static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_b
856} 887}
857#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0 888#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
858#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5 889#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
859static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum adreno_rb_blend_opcode val) 890static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
860{ 891{
861 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK; 892 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
862} 893}
@@ -874,7 +905,7 @@ static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb
874} 905}
875#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000 906#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
876#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21 907#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
877static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum adreno_rb_blend_opcode val) 908static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
878{ 909{
879 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK; 910 return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
880} 911}
@@ -957,17 +988,24 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples
957{ 988{
958 return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK; 989 return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
959} 990}
991#define A3XX_RB_COPY_CONTROL_DEPTHCLEAR 0x00000008
960#define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070 992#define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
961#define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4 993#define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4
962static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val) 994static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
963{ 995{
964 return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK; 996 return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
965} 997}
966#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xfffffc00 998#define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
967#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 10 999#define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
1000static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
1001{
1002 return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
1003}
1004#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
1005#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
968static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val) 1006static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
969{ 1007{
970 return ((val >> 10) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK; 1008 return ((val >> 14) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
971} 1009}
972 1010
973#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed 1011#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed
@@ -1005,6 +1043,12 @@ static inline uint32_t A3XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
1005{ 1043{
1006 return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK; 1044 return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK;
1007} 1045}
1046#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
1047#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
1048static inline uint32_t A3XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
1049{
1050 return ((val) << A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
1051}
1008#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000 1052#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
1009#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14 1053#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
1010static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val) 1054static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
@@ -1019,6 +1063,7 @@ static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endi
1019} 1063}
1020 1064
1021#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100 1065#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100
1066#define A3XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001
1022#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002 1067#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
1023#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004 1068#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
1024#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00000008 1069#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00000008
@@ -1044,7 +1089,7 @@ static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_form
1044#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11 1089#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11
1045static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val) 1090static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
1046{ 1091{
1047 return ((val >> 10) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK; 1092 return ((val >> 12) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
1048} 1093}
1049 1094
1050#define REG_A3XX_RB_DEPTH_PITCH 0x00002103 1095#define REG_A3XX_RB_DEPTH_PITCH 0x00002103
@@ -1172,6 +1217,8 @@ static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val)
1172} 1217}
1173 1218
1174#define REG_A3XX_RB_SAMPLE_COUNT_CONTROL 0x00002110 1219#define REG_A3XX_RB_SAMPLE_COUNT_CONTROL 0x00002110
1220#define A3XX_RB_SAMPLE_COUNT_CONTROL_RESET 0x00000001
1221#define A3XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
1175 1222
1176#define REG_A3XX_RB_SAMPLE_COUNT_ADDR 0x00002111 1223#define REG_A3XX_RB_SAMPLE_COUNT_ADDR 0x00002111
1177 1224
@@ -1179,7 +1226,23 @@ static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val)
1179 1226
1180#define REG_A3XX_RB_Z_CLAMP_MAX 0x00002115 1227#define REG_A3XX_RB_Z_CLAMP_MAX 0x00002115
1181 1228
1229#define REG_A3XX_VGT_BIN_BASE 0x000021e1
1230
1231#define REG_A3XX_VGT_BIN_SIZE 0x000021e2
1232
1182#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4 1233#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4
1234#define A3XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000
1235#define A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16
1236static inline uint32_t A3XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
1237{
1238 return ((val) << A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A3XX_PC_VSTREAM_CONTROL_SIZE__MASK;
1239}
1240#define A3XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000
1241#define A3XX_PC_VSTREAM_CONTROL_N__SHIFT 22
1242static inline uint32_t A3XX_PC_VSTREAM_CONTROL_N(uint32_t val)
1243{
1244 return ((val) << A3XX_PC_VSTREAM_CONTROL_N__SHIFT) & A3XX_PC_VSTREAM_CONTROL_N__MASK;
1245}
1183 1246
1184#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea 1247#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea
1185 1248
@@ -1203,6 +1266,7 @@ static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_
1203 return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK; 1266 return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
1204} 1267}
1205#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000 1268#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
1269#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
1206 1270
1207#define REG_A3XX_PC_RESTART_INDEX 0x000021ed 1271#define REG_A3XX_PC_RESTART_INDEX 0x000021ed
1208 1272
@@ -1232,6 +1296,7 @@ static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize
1232} 1296}
1233#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100 1297#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
1234#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200 1298#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
1299#define A3XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000
1235 1300
1236#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202 1301#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202
1237#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000 1302#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
@@ -1242,6 +1307,12 @@ static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
1242} 1307}
1243 1308
1244#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203 1309#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203
1310#define A3XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff
1311#define A3XX_HLSQ_CONTROL_3_REG_REGID__SHIFT 0
1312static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
1313{
1314 return ((val) << A3XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_REGID__MASK;
1315}
1245 1316
1246#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204 1317#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
1247#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff 1318#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
@@ -1312,10 +1383,36 @@ static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
1312} 1383}
1313 1384
1314#define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a 1385#define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a
1386#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK 0x00000003
1387#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT 0
1388static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM(uint32_t val)
1389{
1390 return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK;
1391}
1392#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK 0x00000ffc
1393#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT 2
1394static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0(uint32_t val)
1395{
1396 return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK;
1397}
1398#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK 0x003ff000
1399#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT 12
1400static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1(uint32_t val)
1401{
1402 return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK;
1403}
1404#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK 0xffc00000
1405#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT 22
1406static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2(uint32_t val)
1407{
1408 return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK;
1409}
1410
1411static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK(uint32_t i0) { return 0x0000220b + 0x2*i0; }
1315 1412
1316#define REG_A3XX_HLSQ_CL_NDRANGE_1_REG 0x0000220b 1413static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_SIZE(uint32_t i0) { return 0x0000220b + 0x2*i0; }
1317 1414
1318#define REG_A3XX_HLSQ_CL_NDRANGE_2_REG 0x0000220c 1415static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_OFFSET(uint32_t i0) { return 0x0000220c + 0x2*i0; }
1319 1416
1320#define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211 1417#define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211
1321 1418
@@ -1323,7 +1420,9 @@ static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
1323 1420
1324#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214 1421#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214
1325 1422
1326#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215 1423static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP(uint32_t i0) { return 0x00002215 + 0x1*i0; }
1424
1425static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP_RATIO(uint32_t i0) { return 0x00002215 + 0x1*i0; }
1327 1426
1328#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x00002216 1427#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x00002216
1329 1428
@@ -1438,6 +1537,12 @@ static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
1438{ 1537{
1439 return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK; 1538 return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
1440} 1539}
1540#define A3XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
1541#define A3XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
1542static inline uint32_t A3XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
1543{
1544 return ((val) << A3XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A3XX_VFD_DECODE_INSTR_SWAP__MASK;
1545}
1441#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000 1546#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
1442#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24 1547#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
1443static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val) 1548static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
@@ -1462,12 +1567,13 @@ static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(uint32_t val
1462} 1567}
1463 1568
1464#define REG_A3XX_VPC_ATTR 0x00002280 1569#define REG_A3XX_VPC_ATTR 0x00002280
1465#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x00000fff 1570#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff
1466#define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0 1571#define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0
1467static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val) 1572static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val)
1468{ 1573{
1469 return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK; 1574 return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK;
1470} 1575}
1576#define A3XX_VPC_ATTR_PSIZE 0x00000200
1471#define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000 1577#define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000
1472#define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12 1578#define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12
1473static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val) 1579static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val)
@@ -1522,11 +1628,11 @@ static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
1522{ 1628{
1523 return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK; 1629 return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK;
1524} 1630}
1525#define A3XX_SP_SP_CTRL_REG_LOMODE__MASK 0x00c00000 1631#define A3XX_SP_SP_CTRL_REG_L0MODE__MASK 0x00c00000
1526#define A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT 22 1632#define A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT 22
1527static inline uint32_t A3XX_SP_SP_CTRL_REG_LOMODE(uint32_t val) 1633static inline uint32_t A3XX_SP_SP_CTRL_REG_L0MODE(uint32_t val)
1528{ 1634{
1529 return ((val) << A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_LOMODE__MASK; 1635 return ((val) << A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT) & A3XX_SP_SP_CTRL_REG_L0MODE__MASK;
1530} 1636}
1531 1637
1532#define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4 1638#define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4
@@ -1569,6 +1675,7 @@ static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
1569} 1675}
1570#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000 1676#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
1571#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000 1677#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
1678#define A3XX_SP_VS_CTRL_REG0_COMPUTEMODE 0x00800000
1572#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000 1679#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000
1573#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24 1680#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24
1574static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val) 1681static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
@@ -1742,6 +1849,7 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
1742} 1849}
1743#define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000 1850#define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
1744#define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000 1851#define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
1852#define A3XX_SP_FS_CTRL_REG0_COMPUTEMODE 0x00800000
1745#define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000 1853#define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000
1746#define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24 1854#define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24
1747static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val) 1855static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val)
@@ -1802,6 +1910,13 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1802#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9 1910#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9
1803 1911
1804#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec 1912#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec
1913#define A3XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080
1914#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00
1915#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8
1916static inline uint32_t A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val)
1917{
1918 return ((val) << A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK;
1919}
1805 1920
1806static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; } 1921static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
1807 1922
@@ -1914,6 +2029,42 @@ static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
1914 2029
1915#define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f 2030#define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f
1916 2031
2032#define REG_A3XX_VBIF_PERF_CNT_EN 0x00003070
2033#define A3XX_VBIF_PERF_CNT_EN_CNT0 0x00000001
2034#define A3XX_VBIF_PERF_CNT_EN_CNT1 0x00000002
2035#define A3XX_VBIF_PERF_CNT_EN_PWRCNT0 0x00000004
2036#define A3XX_VBIF_PERF_CNT_EN_PWRCNT1 0x00000008
2037#define A3XX_VBIF_PERF_CNT_EN_PWRCNT2 0x00000010
2038
2039#define REG_A3XX_VBIF_PERF_CNT_CLR 0x00003071
2040#define A3XX_VBIF_PERF_CNT_CLR_CNT0 0x00000001
2041#define A3XX_VBIF_PERF_CNT_CLR_CNT1 0x00000002
2042#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT0 0x00000004
2043#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT1 0x00000008
2044#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT2 0x00000010
2045
2046#define REG_A3XX_VBIF_PERF_CNT_SEL 0x00003072
2047
2048#define REG_A3XX_VBIF_PERF_CNT0_LO 0x00003073
2049
2050#define REG_A3XX_VBIF_PERF_CNT0_HI 0x00003074
2051
2052#define REG_A3XX_VBIF_PERF_CNT1_LO 0x00003075
2053
2054#define REG_A3XX_VBIF_PERF_CNT1_HI 0x00003076
2055
2056#define REG_A3XX_VBIF_PERF_PWR_CNT0_LO 0x00003077
2057
2058#define REG_A3XX_VBIF_PERF_PWR_CNT0_HI 0x00003078
2059
2060#define REG_A3XX_VBIF_PERF_PWR_CNT1_LO 0x00003079
2061
2062#define REG_A3XX_VBIF_PERF_PWR_CNT1_HI 0x0000307a
2063
2064#define REG_A3XX_VBIF_PERF_PWR_CNT2_LO 0x0000307b
2065
2066#define REG_A3XX_VBIF_PERF_PWR_CNT2_HI 0x0000307c
2067
1917#define REG_A3XX_VSC_BIN_SIZE 0x00000c01 2068#define REG_A3XX_VSC_BIN_SIZE 0x00000c01
1918#define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f 2069#define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
1919#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0 2070#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
@@ -2080,6 +2231,8 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op
2080} 2231}
2081#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000 2232#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000
2082 2233
2234#define REG_A3XX_UNKNOWN_0EA6 0x00000ea6
2235
2083#define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4 2236#define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4
2084 2237
2085#define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5 2238#define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5
@@ -2117,6 +2270,39 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op
2117#define REG_A3XX_VGT_EVENT_INITIATOR 0x000021f9 2270#define REG_A3XX_VGT_EVENT_INITIATOR 0x000021f9
2118 2271
2119#define REG_A3XX_VGT_DRAW_INITIATOR 0x000021fc 2272#define REG_A3XX_VGT_DRAW_INITIATOR 0x000021fc
2273#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
2274#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0
2275static inline uint32_t A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val)
2276{
2277 return ((val) << A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK;
2278}
2279#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0
2280#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6
2281static inline uint32_t A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val)
2282{
2283 return ((val) << A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK;
2284}
2285#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600
2286#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9
2287static inline uint32_t A3XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val)
2288{
2289 return ((val) << A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK;
2290}
2291#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800
2292#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11
2293static inline uint32_t A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val)
2294{
2295 return ((val) << A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK;
2296}
2297#define A3XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
2298#define A3XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
2299#define A3XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
2300#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000
2301#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16
2302static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val)
2303{
2304 return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK;
2305}
2120 2306
2121#define REG_A3XX_VGT_IMMED_DATA 0x000021fd 2307#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
2122 2308
@@ -2152,6 +2338,12 @@ static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
2152{ 2338{
2153 return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK; 2339 return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK;
2154} 2340}
2341#define A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK 0x00700000
2342#define A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT 20
2343static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val)
2344{
2345 return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK;
2346}
2155#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000 2347#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
2156 2348
2157#define REG_A3XX_TEX_SAMP_1 0x00000001 2349#define REG_A3XX_TEX_SAMP_1 0x00000001
@@ -2170,6 +2362,7 @@ static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
2170 2362
2171#define REG_A3XX_TEX_CONST_0 0x00000000 2363#define REG_A3XX_TEX_CONST_0 0x00000000
2172#define A3XX_TEX_CONST_0_TILED 0x00000001 2364#define A3XX_TEX_CONST_0_TILED 0x00000001
2365#define A3XX_TEX_CONST_0_SRGB 0x00000004
2173#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070 2366#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
2174#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4 2367#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4
2175static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val) 2368static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val)
@@ -2206,6 +2399,7 @@ static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
2206{ 2399{
2207 return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK; 2400 return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK;
2208} 2401}
2402#define A3XX_TEX_CONST_0_NOCONVERT 0x20000000
2209#define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000 2403#define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000
2210#define A3XX_TEX_CONST_0_TYPE__SHIFT 30 2404#define A3XX_TEX_CONST_0_TYPE__SHIFT 30
2211static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val) 2405static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 942e09d898a8..2773600c9488 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -392,13 +392,10 @@ static const unsigned int a3xx_registers[] = {
392#ifdef CONFIG_DEBUG_FS 392#ifdef CONFIG_DEBUG_FS
393static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m) 393static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
394{ 394{
395 struct drm_device *dev = gpu->dev;
396 int i; 395 int i;
397 396
398 adreno_show(gpu, m); 397 adreno_show(gpu, m);
399 398
400 mutex_lock(&dev->struct_mutex);
401
402 gpu->funcs->pm_resume(gpu); 399 gpu->funcs->pm_resume(gpu);
403 400
404 seq_printf(m, "status: %08x\n", 401 seq_printf(m, "status: %08x\n",
@@ -418,8 +415,6 @@ static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
418 } 415 }
419 416
420 gpu->funcs->pm_suspend(gpu); 417 gpu->funcs->pm_suspend(gpu);
421
422 mutex_unlock(&dev->struct_mutex);
423} 418}
424#endif 419#endif
425 420
@@ -685,6 +680,8 @@ static int a3xx_remove(struct platform_device *pdev)
685} 680}
686 681
687static const struct of_device_id dt_match[] = { 682static const struct of_device_id dt_match[] = {
683 { .compatible = "qcom,adreno-3xx" },
684 /* for backwards compat w/ downstream kgsl DT files: */
688 { .compatible = "qcom,kgsl-3d0" }, 685 { .compatible = "qcom,kgsl-3d0" },
689 {} 686 {}
690}; 687};
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
index bb9a8ca0507b..85ff66cbddd6 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -19,6 +19,11 @@
19#define __A3XX_GPU_H__ 19#define __A3XX_GPU_H__
20 20
21#include "adreno_gpu.h" 21#include "adreno_gpu.h"
22
23/* arrg, somehow fb.h is getting pulled in: */
24#undef ROP_COPY
25#undef ROP_XOR
26
22#include "a3xx.xml.h" 27#include "a3xx.xml.h"
23 28
24struct a3xx_gpu { 29struct a3xx_gpu {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index d6e6ce2d1abd..9de19ac2e86c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16)
18 18
19Copyright (C) 2013 by the following authors: 19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
21 21
22Permission is hereby granted, free of charge, to any person obtaining 22Permission is hereby granted, free of charge, to any person obtaining
@@ -87,15 +87,6 @@ enum adreno_rb_blend_factor {
87 FACTOR_SRC_ALPHA_SATURATE = 16, 87 FACTOR_SRC_ALPHA_SATURATE = 16,
88}; 88};
89 89
90enum adreno_rb_blend_opcode {
91 BLEND_DST_PLUS_SRC = 0,
92 BLEND_SRC_MINUS_DST = 1,
93 BLEND_MIN_DST_SRC = 2,
94 BLEND_MAX_DST_SRC = 3,
95 BLEND_DST_MINUS_SRC = 4,
96 BLEND_DST_PLUS_SRC_BIAS = 5,
97};
98
99enum adreno_rb_surface_endian { 90enum adreno_rb_surface_endian {
100 ENDIAN_NONE = 0, 91 ENDIAN_NONE = 0,
101 ENDIAN_8IN16 = 1, 92 ENDIAN_8IN16 = 1,
@@ -116,6 +107,39 @@ enum adreno_rb_depth_format {
116 DEPTHX_24_8 = 1, 107 DEPTHX_24_8 = 1,
117}; 108};
118 109
110enum adreno_rb_copy_control_mode {
111 RB_COPY_RESOLVE = 1,
112 RB_COPY_CLEAR = 2,
113 RB_COPY_DEPTH_STENCIL = 5,
114};
115
116enum a3xx_render_mode {
117 RB_RENDERING_PASS = 0,
118 RB_TILING_PASS = 1,
119 RB_RESOLVE_PASS = 2,
120 RB_COMPUTE_PASS = 3,
121};
122
123enum a3xx_msaa_samples {
124 MSAA_ONE = 0,
125 MSAA_TWO = 1,
126 MSAA_FOUR = 2,
127};
128
129enum a3xx_threadmode {
130 MULTI = 0,
131 SINGLE = 1,
132};
133
134enum a3xx_instrbuffermode {
135 BUFFER = 1,
136};
137
138enum a3xx_threadsize {
139 TWO_QUADS = 0,
140 FOUR_QUADS = 1,
141};
142
119#define REG_AXXX_CP_RB_BASE 0x000001c0 143#define REG_AXXX_CP_RB_BASE 0x000001c0
120 144
121#define REG_AXXX_CP_RB_CNTL 0x000001c1 145#define REG_AXXX_CP_RB_CNTL 0x000001c1
@@ -264,6 +288,8 @@ static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
264#define REG_AXXX_CP_INT_ACK 0x000001f4 288#define REG_AXXX_CP_INT_ACK 0x000001f4
265 289
266#define REG_AXXX_CP_ME_CNTL 0x000001f6 290#define REG_AXXX_CP_ME_CNTL 0x000001f6
291#define AXXX_CP_ME_CNTL_BUSY 0x20000000
292#define AXXX_CP_ME_CNTL_HALT 0x10000000
267 293
268#define REG_AXXX_CP_ME_STATUS 0x000001f7 294#define REG_AXXX_CP_ME_STATUS 0x000001f7
269 295
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 28ca8cd8b09e..655ce5b14ad0 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -91,9 +91,17 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
91int adreno_hw_init(struct msm_gpu *gpu) 91int adreno_hw_init(struct msm_gpu *gpu)
92{ 92{
93 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 93 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
94 int ret;
94 95
95 DBG("%s", gpu->name); 96 DBG("%s", gpu->name);
96 97
98 ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
99 if (ret) {
100 gpu->rb_iova = 0;
101 dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
102 return ret;
103 }
104
97 /* Setup REG_CP_RB_CNTL: */ 105 /* Setup REG_CP_RB_CNTL: */
98 gpu_write(gpu, REG_AXXX_CP_RB_CNTL, 106 gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
99 /* size is log2(quad-words): */ 107 /* size is log2(quad-words): */
@@ -362,8 +370,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
362 return ret; 370 return ret;
363 } 371 }
364 372
373 mutex_lock(&drm->struct_mutex);
365 gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs), 374 gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
366 MSM_BO_UNCACHED); 375 MSM_BO_UNCACHED);
376 mutex_unlock(&drm->struct_mutex);
367 if (IS_ERR(gpu->memptrs_bo)) { 377 if (IS_ERR(gpu->memptrs_bo)) {
368 ret = PTR_ERR(gpu->memptrs_bo); 378 ret = PTR_ERR(gpu->memptrs_bo);
369 gpu->memptrs_bo = NULL; 379 gpu->memptrs_bo = NULL;
@@ -371,13 +381,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
371 return ret; 381 return ret;
372 } 382 }
373 383
374 gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo); 384 gpu->memptrs = msm_gem_vaddr(gpu->memptrs_bo);
375 if (!gpu->memptrs) { 385 if (!gpu->memptrs) {
376 dev_err(drm->dev, "could not vmap memptrs\n"); 386 dev_err(drm->dev, "could not vmap memptrs\n");
377 return -ENOMEM; 387 return -ENOMEM;
378 } 388 }
379 389
380 ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id, 390 ret = msm_gem_get_iova(gpu->memptrs_bo, gpu->base.id,
381 &gpu->memptrs_iova); 391 &gpu->memptrs_iova);
382 if (ret) { 392 if (ret) {
383 dev_err(drm->dev, "could not map memptrs: %d\n", ret); 393 dev_err(drm->dev, "could not map memptrs: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index ae992c71703f..4eee0ec8f069 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16)
18 18
19Copyright (C) 2013 by the following authors: 19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
21 21
22Permission is hereby granted, free of charge, to any person obtaining 22Permission is hereby granted, free of charge, to any person obtaining
@@ -105,6 +105,7 @@ enum pc_di_index_size {
105 105
106enum pc_di_vis_cull_mode { 106enum pc_di_vis_cull_mode {
107 IGNORE_VISIBILITY = 0, 107 IGNORE_VISIBILITY = 0,
108 USE_VISIBILITY = 1,
108}; 109};
109 110
110enum adreno_pm4_packet_type { 111enum adreno_pm4_packet_type {
@@ -163,6 +164,11 @@ enum adreno_pm4_type3_packets {
163 CP_SET_BIN = 76, 164 CP_SET_BIN = 76,
164 CP_TEST_TWO_MEMS = 113, 165 CP_TEST_TWO_MEMS = 113,
165 CP_WAIT_FOR_ME = 19, 166 CP_WAIT_FOR_ME = 19,
167 CP_SET_DRAW_STATE = 67,
168 CP_DRAW_INDX_OFFSET = 56,
169 CP_DRAW_INDIRECT = 40,
170 CP_DRAW_INDX_INDIRECT = 41,
171 CP_DRAW_AUTO = 36,
166 IN_IB_PREFETCH_END = 23, 172 IN_IB_PREFETCH_END = 23,
167 IN_SUBBLK_PREFETCH = 31, 173 IN_SUBBLK_PREFETCH = 31,
168 IN_INSTR_PREFETCH = 32, 174 IN_INSTR_PREFETCH = 32,
@@ -232,6 +238,211 @@ static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
232 return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK; 238 return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
233} 239}
234 240
241#define REG_CP_DRAW_INDX_0 0x00000000
242#define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff
243#define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0
244static inline uint32_t CP_DRAW_INDX_0_VIZ_QUERY(uint32_t val)
245{
246 return ((val) << CP_DRAW_INDX_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_0_VIZ_QUERY__MASK;
247}
248
249#define REG_CP_DRAW_INDX_1 0x00000001
250#define CP_DRAW_INDX_1_PRIM_TYPE__MASK 0x0000003f
251#define CP_DRAW_INDX_1_PRIM_TYPE__SHIFT 0
252static inline uint32_t CP_DRAW_INDX_1_PRIM_TYPE(enum pc_di_primtype val)
253{
254 return ((val) << CP_DRAW_INDX_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_1_PRIM_TYPE__MASK;
255}
256#define CP_DRAW_INDX_1_SOURCE_SELECT__MASK 0x000000c0
257#define CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT 6
258static inline uint32_t CP_DRAW_INDX_1_SOURCE_SELECT(enum pc_di_src_sel val)
259{
260 return ((val) << CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_1_SOURCE_SELECT__MASK;
261}
262#define CP_DRAW_INDX_1_VIS_CULL__MASK 0x00000600
263#define CP_DRAW_INDX_1_VIS_CULL__SHIFT 9
264static inline uint32_t CP_DRAW_INDX_1_VIS_CULL(enum pc_di_vis_cull_mode val)
265{
266 return ((val) << CP_DRAW_INDX_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_1_VIS_CULL__MASK;
267}
268#define CP_DRAW_INDX_1_INDEX_SIZE__MASK 0x00000800
269#define CP_DRAW_INDX_1_INDEX_SIZE__SHIFT 11
270static inline uint32_t CP_DRAW_INDX_1_INDEX_SIZE(enum pc_di_index_size val)
271{
272 return ((val) << CP_DRAW_INDX_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_1_INDEX_SIZE__MASK;
273}
274#define CP_DRAW_INDX_1_NOT_EOP 0x00001000
275#define CP_DRAW_INDX_1_SMALL_INDEX 0x00002000
276#define CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
277#define CP_DRAW_INDX_1_NUM_INDICES__MASK 0xffff0000
278#define CP_DRAW_INDX_1_NUM_INDICES__SHIFT 16
279static inline uint32_t CP_DRAW_INDX_1_NUM_INDICES(uint32_t val)
280{
281 return ((val) << CP_DRAW_INDX_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_1_NUM_INDICES__MASK;
282}
283
284#define REG_CP_DRAW_INDX_2 0x00000002
285#define CP_DRAW_INDX_2_NUM_INDICES__MASK 0xffffffff
286#define CP_DRAW_INDX_2_NUM_INDICES__SHIFT 0
287static inline uint32_t CP_DRAW_INDX_2_NUM_INDICES(uint32_t val)
288{
289 return ((val) << CP_DRAW_INDX_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_NUM_INDICES__MASK;
290}
291
292#define REG_CP_DRAW_INDX_2 0x00000002
293#define CP_DRAW_INDX_2_INDX_BASE__MASK 0xffffffff
294#define CP_DRAW_INDX_2_INDX_BASE__SHIFT 0
295static inline uint32_t CP_DRAW_INDX_2_INDX_BASE(uint32_t val)
296{
297 return ((val) << CP_DRAW_INDX_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_2_INDX_BASE__MASK;
298}
299
300#define REG_CP_DRAW_INDX_2 0x00000002
301#define CP_DRAW_INDX_2_INDX_SIZE__MASK 0xffffffff
302#define CP_DRAW_INDX_2_INDX_SIZE__SHIFT 0
303static inline uint32_t CP_DRAW_INDX_2_INDX_SIZE(uint32_t val)
304{
305 return ((val) << CP_DRAW_INDX_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_2_INDX_SIZE__MASK;
306}
307
308#define REG_CP_DRAW_INDX_2_0 0x00000000
309#define CP_DRAW_INDX_2_0_VIZ_QUERY__MASK 0xffffffff
310#define CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT 0
311static inline uint32_t CP_DRAW_INDX_2_0_VIZ_QUERY(uint32_t val)
312{
313 return ((val) << CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_2_0_VIZ_QUERY__MASK;
314}
315
316#define REG_CP_DRAW_INDX_2_1 0x00000001
317#define CP_DRAW_INDX_2_1_PRIM_TYPE__MASK 0x0000003f
318#define CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT 0
319static inline uint32_t CP_DRAW_INDX_2_1_PRIM_TYPE(enum pc_di_primtype val)
320{
321 return ((val) << CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_2_1_PRIM_TYPE__MASK;
322}
323#define CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK 0x000000c0
324#define CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT 6
325static inline uint32_t CP_DRAW_INDX_2_1_SOURCE_SELECT(enum pc_di_src_sel val)
326{
327 return ((val) << CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK;
328}
329#define CP_DRAW_INDX_2_1_VIS_CULL__MASK 0x00000600
330#define CP_DRAW_INDX_2_1_VIS_CULL__SHIFT 9
331static inline uint32_t CP_DRAW_INDX_2_1_VIS_CULL(enum pc_di_vis_cull_mode val)
332{
333 return ((val) << CP_DRAW_INDX_2_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_2_1_VIS_CULL__MASK;
334}
335#define CP_DRAW_INDX_2_1_INDEX_SIZE__MASK 0x00000800
336#define CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT 11
337static inline uint32_t CP_DRAW_INDX_2_1_INDEX_SIZE(enum pc_di_index_size val)
338{
339 return ((val) << CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_2_1_INDEX_SIZE__MASK;
340}
341#define CP_DRAW_INDX_2_1_NOT_EOP 0x00001000
342#define CP_DRAW_INDX_2_1_SMALL_INDEX 0x00002000
343#define CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
344#define CP_DRAW_INDX_2_1_NUM_INDICES__MASK 0xffff0000
345#define CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT 16
346static inline uint32_t CP_DRAW_INDX_2_1_NUM_INDICES(uint32_t val)
347{
348 return ((val) << CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INDICES__MASK;
349}
350
351#define REG_CP_DRAW_INDX_2_2 0x00000002
352#define CP_DRAW_INDX_2_2_NUM_INDICES__MASK 0xffffffff
353#define CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT 0
354static inline uint32_t CP_DRAW_INDX_2_2_NUM_INDICES(uint32_t val)
355{
356 return ((val) << CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_2_NUM_INDICES__MASK;
357}
358
359#define REG_CP_DRAW_INDX_OFFSET_0 0x00000000
360#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK 0x0000003f
361#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT 0
362static inline uint32_t CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(enum pc_di_primtype val)
363{
364 return ((val) << CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK;
365}
366#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK 0x000000c0
367#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT 6
368static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel val)
369{
370 return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
371}
372#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000700
373#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8
374static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val)
375{
376 return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK;
377}
378#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000800
379#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 11
380static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum pc_di_index_size val)
381{
382 return ((val) << CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK;
383}
384#define CP_DRAW_INDX_OFFSET_0_NOT_EOP 0x00001000
385#define CP_DRAW_INDX_OFFSET_0_SMALL_INDEX 0x00002000
386#define CP_DRAW_INDX_OFFSET_0_PRE_DRAW_INITIATOR_ENABLE 0x00004000
387#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK 0xffff0000
388#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT 16
389static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INDICES(uint32_t val)
390{
391 return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK;
392}
393
394#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001
395
396#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
397#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK 0xffffffff
398#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT 0
399static inline uint32_t CP_DRAW_INDX_OFFSET_2_NUM_INDICES(uint32_t val)
400{
401 return ((val) << CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK;
402}
403
404#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
405#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK 0xffffffff
406#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT 0
407static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_BASE(uint32_t val)
408{
409 return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK;
410}
411
412#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
413#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK 0xffffffff
414#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT 0
415static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_SIZE(uint32_t val)
416{
417 return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK;
418}
419
420#define REG_CP_SET_DRAW_STATE_0 0x00000000
421#define CP_SET_DRAW_STATE_0_COUNT__MASK 0x0000ffff
422#define CP_SET_DRAW_STATE_0_COUNT__SHIFT 0
423static inline uint32_t CP_SET_DRAW_STATE_0_COUNT(uint32_t val)
424{
425 return ((val) << CP_SET_DRAW_STATE_0_COUNT__SHIFT) & CP_SET_DRAW_STATE_0_COUNT__MASK;
426}
427#define CP_SET_DRAW_STATE_0_DIRTY 0x00010000
428#define CP_SET_DRAW_STATE_0_DISABLE 0x00020000
429#define CP_SET_DRAW_STATE_0_DISABLE_ALL_GROUPS 0x00040000
430#define CP_SET_DRAW_STATE_0_LOAD_IMMED 0x00080000
431#define CP_SET_DRAW_STATE_0_GROUP_ID__MASK 0x1f000000
432#define CP_SET_DRAW_STATE_0_GROUP_ID__SHIFT 24
433static inline uint32_t CP_SET_DRAW_STATE_0_GROUP_ID(uint32_t val)
434{
435 return ((val) << CP_SET_DRAW_STATE_0_GROUP_ID__SHIFT) & CP_SET_DRAW_STATE_0_GROUP_ID__MASK;
436}
437
438#define REG_CP_SET_DRAW_STATE_1 0x00000001
439#define CP_SET_DRAW_STATE_1_ADDR__MASK 0xffffffff
440#define CP_SET_DRAW_STATE_1_ADDR__SHIFT 0
441static inline uint32_t CP_SET_DRAW_STATE_1_ADDR(uint32_t val)
442{
443 return ((val) << CP_SET_DRAW_STATE_1_ADDR__SHIFT) & CP_SET_DRAW_STATE_1_ADDR__MASK;
444}
445
235#define REG_CP_SET_BIN_0 0x00000000 446#define REG_CP_SET_BIN_0 0x00000000
236 447
237#define REG_CP_SET_BIN_1 0x00000001 448#define REG_CP_SET_BIN_1 0x00000001
@@ -262,5 +473,21 @@ static inline uint32_t CP_SET_BIN_2_Y2(uint32_t val)
262 return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK; 473 return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK;
263} 474}
264 475
476#define REG_CP_SET_BIN_DATA_0 0x00000000
477#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK 0xffffffff
478#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT 0
479static inline uint32_t CP_SET_BIN_DATA_0_BIN_DATA_ADDR(uint32_t val)
480{
481 return ((val) << CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT) & CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK;
482}
483
484#define REG_CP_SET_BIN_DATA_1 0x00000001
485#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK 0xffffffff
486#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT 0
487static inline uint32_t CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS(uint32_t val)
488{
489 return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK;
490}
491
265 492
266#endif /* ADRENO_PM4_XML */ 493#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 87be647e3825..0f1f5b9459a5 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
21 21
22Copyright (C) 2013 by the following authors: 22Copyright (C) 2013 by the following authors:
23- Rob Clark <robdclark@gmail.com> (robclark) 23- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 747a6ef4211f..d468f86f637c 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
21 21
22Copyright (C) 2013 by the following authors: 22Copyright (C) 2013 by the following authors:
23- Rob Clark <robdclark@gmail.com> (robclark) 23- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 48e03acf19bf..da8740054cdf 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
21 21
22Copyright (C) 2013 by the following authors: 22Copyright (C) 2013 by the following authors:
23- Rob Clark <robdclark@gmail.com> (robclark) 23- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 7f7aadef8a82..a125a7e32742 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -123,7 +123,8 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
123 for (i = 0; i < config->hpd_reg_cnt; i++) { 123 for (i = 0; i < config->hpd_reg_cnt; i++) {
124 struct regulator *reg; 124 struct regulator *reg;
125 125
126 reg = devm_regulator_get(&pdev->dev, config->hpd_reg_names[i]); 126 reg = devm_regulator_get_exclusive(&pdev->dev,
127 config->hpd_reg_names[i]);
127 if (IS_ERR(reg)) { 128 if (IS_ERR(reg)) {
128 ret = PTR_ERR(reg); 129 ret = PTR_ERR(reg);
129 dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n", 130 dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n",
@@ -138,7 +139,8 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
138 for (i = 0; i < config->pwr_reg_cnt; i++) { 139 for (i = 0; i < config->pwr_reg_cnt; i++) {
139 struct regulator *reg; 140 struct regulator *reg;
140 141
141 reg = devm_regulator_get(&pdev->dev, config->pwr_reg_names[i]); 142 reg = devm_regulator_get_exclusive(&pdev->dev,
143 config->pwr_reg_names[i]);
142 if (IS_ERR(reg)) { 144 if (IS_ERR(reg)) {
143 ret = PTR_ERR(reg); 145 ret = PTR_ERR(reg);
144 dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n", 146 dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n",
@@ -266,37 +268,56 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
266 { 268 {
267 int gpio = of_get_named_gpio(of_node, name, 0); 269 int gpio = of_get_named_gpio(of_node, name, 0);
268 if (gpio < 0) { 270 if (gpio < 0) {
269 dev_err(dev, "failed to get gpio: %s (%d)\n", 271 char name2[32];
270 name, gpio); 272 snprintf(name2, sizeof(name2), "%s-gpio", name);
271 gpio = -1; 273 gpio = of_get_named_gpio(of_node, name2, 0);
274 if (gpio < 0) {
275 dev_err(dev, "failed to get gpio: %s (%d)\n",
276 name, gpio);
277 gpio = -1;
278 }
272 } 279 }
273 return gpio; 280 return gpio;
274 } 281 }
275 282
276 /* TODO actually use DT.. */ 283 if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) {
277 static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"}; 284 static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
278 static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"}; 285 static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
279 static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"}; 286 static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
280 static unsigned long hpd_clk_freq[] = {0, 19200000, 0}; 287 static unsigned long hpd_clk_freq[] = {0, 19200000, 0};
281 static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"}; 288 static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
289 config.phy_init = hdmi_phy_8x74_init;
290 config.hpd_reg_names = hpd_reg_names;
291 config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
292 config.pwr_reg_names = pwr_reg_names;
293 config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
294 config.hpd_clk_names = hpd_clk_names;
295 config.hpd_freq = hpd_clk_freq;
296 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
297 config.pwr_clk_names = pwr_clk_names;
298 config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
299 config.shared_irq = true;
300 } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) {
301 static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
302 static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"};
303 config.phy_init = hdmi_phy_8960_init;
304 config.hpd_reg_names = hpd_reg_names;
305 config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
306 config.hpd_clk_names = hpd_clk_names;
307 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
308 } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8660")) {
309 config.phy_init = hdmi_phy_8x60_init;
310 } else {
311 dev_err(dev, "unknown phy: %s\n", of_node->name);
312 }
282 313
283 config.phy_init = hdmi_phy_8x74_init;
284 config.mmio_name = "core_physical"; 314 config.mmio_name = "core_physical";
285 config.hpd_reg_names = hpd_reg_names;
286 config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
287 config.pwr_reg_names = pwr_reg_names;
288 config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
289 config.hpd_clk_names = hpd_clk_names;
290 config.hpd_freq = hpd_clk_freq;
291 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
292 config.pwr_clk_names = pwr_clk_names;
293 config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
294 config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk"); 315 config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk");
295 config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data"); 316 config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data");
296 config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd"); 317 config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd");
297 config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en"); 318 config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en");
298 config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel"); 319 config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel");
299 config.shared_irq = true; 320 config.mux_lpm_gpio = get_gpio("qcom,hdmi-tx-mux-lpm");
300 321
301#else 322#else
302 static const char *hpd_clk_names[] = { 323 static const char *hpd_clk_names[] = {
@@ -373,7 +394,9 @@ static int hdmi_dev_remove(struct platform_device *pdev)
373} 394}
374 395
375static const struct of_device_id dt_match[] = { 396static const struct of_device_id dt_match[] = {
376 { .compatible = "qcom,hdmi-tx" }, 397 { .compatible = "qcom,hdmi-tx-8074" },
398 { .compatible = "qcom,hdmi-tx-8960" },
399 { .compatible = "qcom,hdmi-tx-8660" },
377 {} 400 {}
378}; 401};
379 402
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 9d7723c6528a..b981995410b5 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -96,6 +96,7 @@ struct hdmi_platform_config {
96 96
97 /* gpio's: */ 97 /* gpio's: */
98 int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio; 98 int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
99 int mux_lpm_gpio;
99 100
100 /* older devices had their own irq, mdp5+ it is shared w/ mdp: */ 101 /* older devices had their own irq, mdp5+ it is shared w/ mdp: */
101 bool shared_irq; 102 bool shared_irq;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index e2636582cfd7..e89fe053d375 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -12,14 +12,14 @@ The rules-ng-ng source files this header was generated from are:
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
21 21
22Copyright (C) 2013 by the following authors: 22Copyright (C) 2013-2014 by the following authors:
23- Rob Clark <robdclark@gmail.com> (robclark) 23- Rob Clark <robdclark@gmail.com> (robclark)
24 24
25Permission is hereby granted, free of charge, to any person obtaining 25Permission is hereby granted, free of charge, to any person obtaining
@@ -148,9 +148,9 @@ static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*
148 148
149static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; } 149static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; }
150 150
151static inline uint32_t REG_HDMI_ACR(uint32_t i0) { return 0x000000c4 + 0x8*i0; } 151static inline uint32_t REG_HDMI_ACR(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; }
152 152
153static inline uint32_t REG_HDMI_ACR_0(uint32_t i0) { return 0x000000c4 + 0x8*i0; } 153static inline uint32_t REG_HDMI_ACR_0(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; }
154#define HDMI_ACR_0_CTS__MASK 0xfffff000 154#define HDMI_ACR_0_CTS__MASK 0xfffff000
155#define HDMI_ACR_0_CTS__SHIFT 12 155#define HDMI_ACR_0_CTS__SHIFT 12
156static inline uint32_t HDMI_ACR_0_CTS(uint32_t val) 156static inline uint32_t HDMI_ACR_0_CTS(uint32_t val)
@@ -158,7 +158,7 @@ static inline uint32_t HDMI_ACR_0_CTS(uint32_t val)
158 return ((val) << HDMI_ACR_0_CTS__SHIFT) & HDMI_ACR_0_CTS__MASK; 158 return ((val) << HDMI_ACR_0_CTS__SHIFT) & HDMI_ACR_0_CTS__MASK;
159} 159}
160 160
161static inline uint32_t REG_HDMI_ACR_1(uint32_t i0) { return 0x000000c8 + 0x8*i0; } 161static inline uint32_t REG_HDMI_ACR_1(enum hdmi_acr_cts i0) { return 0x000000c8 + 0x8*i0; }
162#define HDMI_ACR_1_N__MASK 0xffffffff 162#define HDMI_ACR_1_N__MASK 0xffffffff
163#define HDMI_ACR_1_N__SHIFT 0 163#define HDMI_ACR_1_N__SHIFT 0
164static inline uint32_t HDMI_ACR_1_N(uint32_t val) 164static inline uint32_t HDMI_ACR_1_N(uint32_t val)
@@ -552,6 +552,103 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
552#define REG_HDMI_8960_PHY_REG11 0x0000042c 552#define REG_HDMI_8960_PHY_REG11 0x0000042c
553 553
554#define REG_HDMI_8960_PHY_REG12 0x00000430 554#define REG_HDMI_8960_PHY_REG12 0x00000430
555#define HDMI_8960_PHY_REG12_SW_RESET 0x00000020
556#define HDMI_8960_PHY_REG12_PWRDN_B 0x00000080
557
558#define REG_HDMI_8960_PHY_REG_BIST_CFG 0x00000434
559
560#define REG_HDMI_8960_PHY_DEBUG_BUS_SEL 0x00000438
561
562#define REG_HDMI_8960_PHY_REG_MISC0 0x0000043c
563
564#define REG_HDMI_8960_PHY_REG13 0x00000440
565
566#define REG_HDMI_8960_PHY_REG14 0x00000444
567
568#define REG_HDMI_8960_PHY_REG15 0x00000448
569
570#define REG_HDMI_8960_PHY_PLL_REFCLK_CFG 0x00000500
571
572#define REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG 0x00000504
573
574#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 0x00000508
575
576#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 0x0000050c
577
578#define REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG 0x00000510
579
580#define REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG 0x00000514
581
582#define REG_HDMI_8960_PHY_PLL_PWRDN_B 0x00000518
583#define HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL 0x00000002
584#define HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B 0x00000008
585
586#define REG_HDMI_8960_PHY_PLL_SDM_CFG0 0x0000051c
587
588#define REG_HDMI_8960_PHY_PLL_SDM_CFG1 0x00000520
589
590#define REG_HDMI_8960_PHY_PLL_SDM_CFG2 0x00000524
591
592#define REG_HDMI_8960_PHY_PLL_SDM_CFG3 0x00000528
593
594#define REG_HDMI_8960_PHY_PLL_SDM_CFG4 0x0000052c
595
596#define REG_HDMI_8960_PHY_PLL_SSC_CFG0 0x00000530
597
598#define REG_HDMI_8960_PHY_PLL_SSC_CFG1 0x00000534
599
600#define REG_HDMI_8960_PHY_PLL_SSC_CFG2 0x00000538
601
602#define REG_HDMI_8960_PHY_PLL_SSC_CFG3 0x0000053c
603
604#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 0x00000540
605
606#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 0x00000544
607
608#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 0x00000548
609
610#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 0x0000054c
611
612#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 0x00000550
613
614#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 0x00000554
615
616#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 0x00000558
617
618#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 0x0000055c
619
620#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 0x00000560
621
622#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 0x00000564
623
624#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 0x00000568
625
626#define REG_HDMI_8960_PHY_PLL_DEBUG_SEL 0x0000056c
627
628#define REG_HDMI_8960_PHY_PLL_MISC0 0x00000570
629
630#define REG_HDMI_8960_PHY_PLL_MISC1 0x00000574
631
632#define REG_HDMI_8960_PHY_PLL_MISC2 0x00000578
633
634#define REG_HDMI_8960_PHY_PLL_MISC3 0x0000057c
635
636#define REG_HDMI_8960_PHY_PLL_MISC4 0x00000580
637
638#define REG_HDMI_8960_PHY_PLL_MISC5 0x00000584
639
640#define REG_HDMI_8960_PHY_PLL_MISC6 0x00000588
641
642#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS0 0x0000058c
643
644#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS1 0x00000590
645
646#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS2 0x00000594
647
648#define REG_HDMI_8960_PHY_PLL_STATUS0 0x00000598
649#define HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK 0x00000001
650
651#define REG_HDMI_8960_PHY_PLL_STATUS1 0x0000059c
555 652
556#define REG_HDMI_8x74_ANA_CFG0 0x00000000 653#define REG_HDMI_8x74_ANA_CFG0 0x00000000
557 654
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 28f7e3ec6c28..4aca2a3c667c 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -63,7 +63,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
63 ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN"); 63 ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
64 if (ret) { 64 if (ret) {
65 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", 65 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
66 "HDMI_MUX_SEL", config->mux_en_gpio, ret); 66 "HDMI_MUX_EN", config->mux_en_gpio, ret);
67 goto error4; 67 goto error4;
68 } 68 }
69 gpio_set_value_cansleep(config->mux_en_gpio, 1); 69 gpio_set_value_cansleep(config->mux_en_gpio, 1);
@@ -78,6 +78,19 @@ static int gpio_config(struct hdmi *hdmi, bool on)
78 } 78 }
79 gpio_set_value_cansleep(config->mux_sel_gpio, 0); 79 gpio_set_value_cansleep(config->mux_sel_gpio, 0);
80 } 80 }
81
82 if (config->mux_lpm_gpio != -1) {
83 ret = gpio_request(config->mux_lpm_gpio,
84 "HDMI_MUX_LPM");
85 if (ret) {
86 dev_err(dev->dev,
87 "'%s'(%d) gpio_request failed: %d\n",
88 "HDMI_MUX_LPM",
89 config->mux_lpm_gpio, ret);
90 goto error6;
91 }
92 gpio_set_value_cansleep(config->mux_lpm_gpio, 1);
93 }
81 DBG("gpio on"); 94 DBG("gpio on");
82 } else { 95 } else {
83 gpio_free(config->ddc_clk_gpio); 96 gpio_free(config->ddc_clk_gpio);
@@ -93,11 +106,19 @@ static int gpio_config(struct hdmi *hdmi, bool on)
93 gpio_set_value_cansleep(config->mux_sel_gpio, 1); 106 gpio_set_value_cansleep(config->mux_sel_gpio, 1);
94 gpio_free(config->mux_sel_gpio); 107 gpio_free(config->mux_sel_gpio);
95 } 108 }
109
110 if (config->mux_lpm_gpio != -1) {
111 gpio_set_value_cansleep(config->mux_lpm_gpio, 0);
112 gpio_free(config->mux_lpm_gpio);
113 }
96 DBG("gpio off"); 114 DBG("gpio off");
97 } 115 }
98 116
99 return 0; 117 return 0;
100 118
119error6:
120 if (config->mux_sel_gpio != -1)
121 gpio_free(config->mux_sel_gpio);
101error5: 122error5:
102 if (config->mux_en_gpio != -1) 123 if (config->mux_en_gpio != -1)
103 gpio_free(config->mux_en_gpio); 124 gpio_free(config->mux_en_gpio);
@@ -306,7 +327,7 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
306 327
307 hdp_disable(hdmi_connector); 328 hdp_disable(hdmi_connector);
308 329
309 drm_sysfs_connector_remove(connector); 330 drm_connector_unregister(connector);
310 drm_connector_cleanup(connector); 331 drm_connector_cleanup(connector);
311 332
312 hdmi_unreference(hdmi_connector->hdmi); 333 hdmi_unreference(hdmi_connector->hdmi);
@@ -416,7 +437,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
416 connector->interlace_allowed = 1; 437 connector->interlace_allowed = 1;
417 connector->doublescan_allowed = 0; 438 connector->doublescan_allowed = 0;
418 439
419 drm_sysfs_connector_add(connector); 440 drm_connector_register(connector);
420 441
421 ret = hpd_enable(hdmi_connector); 442 ret = hpd_enable(hdmi_connector);
422 if (ret) { 443 if (ret) {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index e5b7ed5b8f01..902d7685d441 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -15,13 +15,370 @@
15 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/clk.h>
19#include <linux/clk-provider.h>
20
18#include "hdmi.h" 21#include "hdmi.h"
19 22
20struct hdmi_phy_8960 { 23struct hdmi_phy_8960 {
21 struct hdmi_phy base; 24 struct hdmi_phy base;
22 struct hdmi *hdmi; 25 struct hdmi *hdmi;
26 struct clk_hw pll_hw;
27 struct clk *pll;
28 unsigned long pixclk;
23}; 29};
24#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base) 30#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
31#define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw)
32
33/*
34 * HDMI PLL:
35 *
36 * To get the parent clock setup properly, we need to plug in hdmi pll
37 * configuration into common-clock-framework.
38 */
39
40struct pll_rate {
41 unsigned long rate;
42 struct {
43 uint32_t val;
44 uint32_t reg;
45 } conf[32];
46};
47
48/* NOTE: keep sorted highest freq to lowest: */
49static const struct pll_rate freqtbl[] = {
50 /* 1080p60/1080p50 case */
51 { 148500000, {
52 { 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
53 { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
54 { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
55 { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
56 { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
57 { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
58 { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
59 { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
60 { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
61 { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
62 { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
63 { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
64 { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
65 { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
66 { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
67 { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
68 { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
69 { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
70 { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
71 { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
72 { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
73 { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
74 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
75 { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
76 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
77 { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
78 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
79 { 0, 0 } }
80 },
81 { 108000000, {
82 { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
83 { 0x21, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
84 { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
85 { 0x1c, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
86 { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
87 { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
88 { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
89 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
90 { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
91 { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
92 { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
93 { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
94 { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
95 { 0, 0 } }
96 },
97 /* 720p60/720p50/1080i60/1080i50/1080p24/1080p30/1080p25 */
98 { 74250000, {
99 { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
100 { 0x12, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
101 { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
102 { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
103 { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
104 { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
105 { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
106 { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
107 { 0, 0 } }
108 },
109 { 65000000, {
110 { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
111 { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
112 { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
113 { 0x8a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
114 { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
115 { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
116 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
117 { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
118 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
119 { 0x0b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
120 { 0x4b, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
121 { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
122 { 0x09, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
123 { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
124 { 0, 0 } }
125 },
126 /* 480p60/480i60 */
127 { 27030000, {
128 { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
129 { 0x38, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
130 { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
131 { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
132 { 0xff, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
133 { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
134 { 0x4e, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
135 { 0xd7, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
136 { 0x03, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
137 { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
138 { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
139 { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
140 { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
141 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
142 { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
143 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
144 { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
145 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
146 { 0, 0 } }
147 },
148 /* 576p50/576i50 */
149 { 27000000, {
150 { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
151 { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
152 { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
153 { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
154 { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
155 { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
156 { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
157 { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
158 { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
159 { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
160 { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
161 { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
162 { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
163 { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
164 { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
165 { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
166 { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
167 { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
168 { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
169 { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
170 { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
171 { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
172 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
173 { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
174 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
175 { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
176 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
177 { 0, 0 } }
178 },
179 /* 640x480p60 */
180 { 25200000, {
181 { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
182 { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
183 { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
184 { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
185 { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
186 { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
187 { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
188 { 0x77, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
189 { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
190 { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
191 { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
192 { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
193 { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
194 { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
195 { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
196 { 0x20, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
197 { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
198 { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
199 { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
200 { 0xf4, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
201 { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
202 { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
203 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
204 { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
205 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
206 { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
207 { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
208 { 0, 0 } }
209 },
210};
211
212static int hdmi_pll_enable(struct clk_hw *hw)
213{
214 struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
215 struct hdmi *hdmi = phy_8960->hdmi;
216 int timeout_count, pll_lock_retry = 10;
217 unsigned int val;
218
219 DBG("");
220
221 /* Assert PLL S/W reset */
222 hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
223 hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0, 0x10);
224 hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1, 0x1a);
225
226 /* Wait for a short time before de-asserting
227 * to allow the hardware to complete its job.
228 * This much of delay should be fine for hardware
229 * to assert and de-assert.
230 */
231 udelay(10);
232
233 /* De-assert PLL S/W reset */
234 hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
235
236 val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
237 val |= HDMI_8960_PHY_REG12_SW_RESET;
238 /* Assert PHY S/W reset */
239 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
240 val &= ~HDMI_8960_PHY_REG12_SW_RESET;
241 /* Wait for a short time before de-asserting
242 to allow the hardware to complete its job.
243 This much of delay should be fine for hardware
244 to assert and de-assert. */
245 udelay(10);
246 /* De-assert PHY S/W reset */
247 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
248 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x3f);
249
250 val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
251 val |= HDMI_8960_PHY_REG12_PWRDN_B;
252 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
253 /* Wait 10 us for enabling global power for PHY */
254 mb();
255 udelay(10);
256
257 val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B);
258 val |= HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B;
259 val &= ~HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL;
260 hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
261 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x80);
262
263 timeout_count = 1000;
264 while (--pll_lock_retry > 0) {
265
266 /* are we there yet? */
267 val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_STATUS0);
268 if (val & HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK)
269 break;
270
271 udelay(1);
272
273 if (--timeout_count > 0)
274 continue;
275
276 /*
277 * PLL has still not locked.
278 * Do a software reset and try again
279 * Assert PLL S/W reset first
280 */
281 hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
282 udelay(10);
283 hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
284
285 /*
286 * Wait for a short duration for the PLL calibration
287 * before checking if the PLL gets locked
288 */
289 udelay(350);
290
291 timeout_count = 1000;
292 }
293
294 return 0;
295}
296
297static void hdmi_pll_disable(struct clk_hw *hw)
298{
299 struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
300 struct hdmi *hdmi = phy_8960->hdmi;
301 unsigned int val;
302
303 DBG("");
304
305 val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
306 val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
307 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
308
309 val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B);
310 val |= HDMI_8960_PHY_REG12_SW_RESET;
311 val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
312 hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
313 /* Make sure HDMI PHY/PLL are powered down */
314 mb();
315}
316
317static const struct pll_rate *find_rate(unsigned long rate)
318{
319 int i;
320 for (i = 1; i < ARRAY_SIZE(freqtbl); i++)
321 if (rate > freqtbl[i].rate)
322 return &freqtbl[i-1];
323 return &freqtbl[i-1];
324}
325
326static unsigned long hdmi_pll_recalc_rate(struct clk_hw *hw,
327 unsigned long parent_rate)
328{
329 struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
330 return phy_8960->pixclk;
331}
332
333static long hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
334 unsigned long *parent_rate)
335{
336 const struct pll_rate *pll_rate = find_rate(rate);
337 return pll_rate->rate;
338}
339
340static int hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
341 unsigned long parent_rate)
342{
343 struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
344 struct hdmi *hdmi = phy_8960->hdmi;
345 const struct pll_rate *pll_rate = find_rate(rate);
346 int i;
347
348 DBG("rate=%lu", rate);
349
350 for (i = 0; pll_rate->conf[i].reg; i++)
351 hdmi_write(hdmi, pll_rate->conf[i].reg, pll_rate->conf[i].val);
352
353 phy_8960->pixclk = rate;
354
355 return 0;
356}
357
358
359static const struct clk_ops hdmi_pll_ops = {
360 .enable = hdmi_pll_enable,
361 .disable = hdmi_pll_disable,
362 .recalc_rate = hdmi_pll_recalc_rate,
363 .round_rate = hdmi_pll_round_rate,
364 .set_rate = hdmi_pll_set_rate,
365};
366
367static const char *hdmi_pll_parents[] = {
368 "pxo",
369};
370
371static struct clk_init_data pll_init = {
372 .name = "hdmi_pll",
373 .ops = &hdmi_pll_ops,
374 .parent_names = hdmi_pll_parents,
375 .num_parents = ARRAY_SIZE(hdmi_pll_parents),
376};
377
378
379/*
380 * HDMI Phy:
381 */
25 382
26static void hdmi_phy_8960_destroy(struct hdmi_phy *phy) 383static void hdmi_phy_8960_destroy(struct hdmi_phy *phy)
27{ 384{
@@ -86,6 +443,9 @@ static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
86 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy); 443 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
87 struct hdmi *hdmi = phy_8960->hdmi; 444 struct hdmi *hdmi = phy_8960->hdmi;
88 445
446 DBG("pixclock: %lu", pixclock);
447
448 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x00);
89 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG0, 0x1b); 449 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG0, 0x1b);
90 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG1, 0xf2); 450 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG1, 0xf2);
91 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG4, 0x00); 451 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG4, 0x00);
@@ -104,6 +464,8 @@ static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
104 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy); 464 struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
105 struct hdmi *hdmi = phy_8960->hdmi; 465 struct hdmi *hdmi = phy_8960->hdmi;
106 466
467 DBG("");
468
107 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x7f); 469 hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x7f);
108} 470}
109 471
@@ -118,7 +480,12 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
118{ 480{
119 struct hdmi_phy_8960 *phy_8960; 481 struct hdmi_phy_8960 *phy_8960;
120 struct hdmi_phy *phy = NULL; 482 struct hdmi_phy *phy = NULL;
121 int ret; 483 int ret, i;
484
485 /* sanity check: */
486 for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
487 if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate))
488 return ERR_PTR(-EINVAL);
122 489
123 phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL); 490 phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
124 if (!phy_8960) { 491 if (!phy_8960) {
@@ -132,6 +499,14 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
132 499
133 phy_8960->hdmi = hdmi; 500 phy_8960->hdmi = hdmi;
134 501
502 phy_8960->pll_hw.init = &pll_init;
503 phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw);
504 if (IS_ERR(phy_8960->pll)) {
505 ret = PTR_ERR(phy_8960->pll);
506 phy_8960->pll = NULL;
507 goto fail;
508 }
509
135 return phy; 510 return phy;
136 511
137fail: 512fail:
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index d591567173c4..bd81db6a7829 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
21 21
22Copyright (C) 2013 by the following authors: 22Copyright (C) 2013 by the following authors:
23- Rob Clark <robdclark@gmail.com> (robclark) 23- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 416a26e1e58d..122208e8a2ee 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
21 21
22Copyright (C) 2013 by the following authors: 22Copyright (C) 2013 by the following authors:
23- Rob Clark <robdclark@gmail.com> (robclark) 23- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 0bb4faa17523..733646c0d3f8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -147,7 +147,7 @@ static void mdp4_destroy(struct msm_kms *kms)
147 if (mdp4_kms->blank_cursor_iova) 147 if (mdp4_kms->blank_cursor_iova)
148 msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id); 148 msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
149 if (mdp4_kms->blank_cursor_bo) 149 if (mdp4_kms->blank_cursor_bo)
150 drm_gem_object_unreference(mdp4_kms->blank_cursor_bo); 150 drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
151 kfree(mdp4_kms); 151 kfree(mdp4_kms);
152} 152}
153 153
@@ -176,6 +176,8 @@ int mdp4_disable(struct mdp4_kms *mdp4_kms)
176 if (mdp4_kms->pclk) 176 if (mdp4_kms->pclk)
177 clk_disable_unprepare(mdp4_kms->pclk); 177 clk_disable_unprepare(mdp4_kms->pclk);
178 clk_disable_unprepare(mdp4_kms->lut_clk); 178 clk_disable_unprepare(mdp4_kms->lut_clk);
179 if (mdp4_kms->axi_clk)
180 clk_disable_unprepare(mdp4_kms->axi_clk);
179 181
180 return 0; 182 return 0;
181} 183}
@@ -188,6 +190,8 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
188 if (mdp4_kms->pclk) 190 if (mdp4_kms->pclk)
189 clk_prepare_enable(mdp4_kms->pclk); 191 clk_prepare_enable(mdp4_kms->pclk);
190 clk_prepare_enable(mdp4_kms->lut_clk); 192 clk_prepare_enable(mdp4_kms->lut_clk);
193 if (mdp4_kms->axi_clk)
194 clk_prepare_enable(mdp4_kms->axi_clk);
191 195
192 return 0; 196 return 0;
193} 197}
@@ -294,15 +298,17 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
294 goto fail; 298 goto fail;
295 } 299 }
296 300
297 mdp4_kms->dsi_pll_vdda = devm_regulator_get(&pdev->dev, "dsi_pll_vdda"); 301 mdp4_kms->dsi_pll_vdda =
302 devm_regulator_get_optional(&pdev->dev, "dsi_pll_vdda");
298 if (IS_ERR(mdp4_kms->dsi_pll_vdda)) 303 if (IS_ERR(mdp4_kms->dsi_pll_vdda))
299 mdp4_kms->dsi_pll_vdda = NULL; 304 mdp4_kms->dsi_pll_vdda = NULL;
300 305
301 mdp4_kms->dsi_pll_vddio = devm_regulator_get(&pdev->dev, "dsi_pll_vddio"); 306 mdp4_kms->dsi_pll_vddio =
307 devm_regulator_get_optional(&pdev->dev, "dsi_pll_vddio");
302 if (IS_ERR(mdp4_kms->dsi_pll_vddio)) 308 if (IS_ERR(mdp4_kms->dsi_pll_vddio))
303 mdp4_kms->dsi_pll_vddio = NULL; 309 mdp4_kms->dsi_pll_vddio = NULL;
304 310
305 mdp4_kms->vdd = devm_regulator_get(&pdev->dev, "vdd"); 311 mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
306 if (IS_ERR(mdp4_kms->vdd)) 312 if (IS_ERR(mdp4_kms->vdd))
307 mdp4_kms->vdd = NULL; 313 mdp4_kms->vdd = NULL;
308 314
@@ -333,6 +339,13 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
333 goto fail; 339 goto fail;
334 } 340 }
335 341
342 mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "mdp_axi_clk");
343 if (IS_ERR(mdp4_kms->axi_clk)) {
344 dev_err(dev->dev, "failed to get axi_clk\n");
345 ret = PTR_ERR(mdp4_kms->axi_clk);
346 goto fail;
347 }
348
336 clk_set_rate(mdp4_kms->clk, config->max_clk); 349 clk_set_rate(mdp4_kms->clk, config->max_clk);
337 clk_set_rate(mdp4_kms->lut_clk, config->max_clk); 350 clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
338 351
@@ -348,7 +361,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
348 mdelay(16); 361 mdelay(16);
349 362
350 if (config->iommu) { 363 if (config->iommu) {
351 mmu = msm_iommu_new(dev, config->iommu); 364 mmu = msm_iommu_new(&pdev->dev, config->iommu);
352 if (IS_ERR(mmu)) { 365 if (IS_ERR(mmu)) {
353 ret = PTR_ERR(mmu); 366 ret = PTR_ERR(mmu);
354 goto fail; 367 goto fail;
@@ -406,6 +419,8 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
406 static struct mdp4_platform_config config = {}; 419 static struct mdp4_platform_config config = {};
407#ifdef CONFIG_OF 420#ifdef CONFIG_OF
408 /* TODO */ 421 /* TODO */
422 config.max_clk = 266667000;
423 config.iommu = iommu_domain_alloc(&platform_bus_type);
409#else 424#else
410 if (cpu_is_apq8064()) 425 if (cpu_is_apq8064())
411 config.max_clk = 266667000; 426 config.max_clk = 266667000;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 715520c54cde..3225da804c61 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -42,6 +42,7 @@ struct mdp4_kms {
42 struct clk *clk; 42 struct clk *clk;
43 struct clk *pclk; 43 struct clk *pclk;
44 struct clk *lut_clk; 44 struct clk *lut_clk;
45 struct clk *axi_clk;
45 46
46 struct mdp_irq error_handler; 47 struct mdp_irq error_handler;
47 48
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 0aa51517f826..67f4f896ba8c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -12,14 +12,14 @@ The rules-ng-ng source files this header was generated from are:
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
21 21
22Copyright (C) 2013 by the following authors: 22Copyright (C) 2013-2014 by the following authors:
23- Rob Clark <robdclark@gmail.com> (robclark) 23- Rob Clark <robdclark@gmail.com> (robclark)
24 24
25Permission is hereby granted, free of charge, to any person obtaining 25Permission is hereby granted, free of charge, to any person obtaining
@@ -68,6 +68,8 @@ enum mdp5_pipe {
68 SSPP_RGB2 = 5, 68 SSPP_RGB2 = 5,
69 SSPP_DMA0 = 6, 69 SSPP_DMA0 = 6,
70 SSPP_DMA1 = 7, 70 SSPP_DMA1 = 7,
71 SSPP_VIG3 = 8,
72 SSPP_RGB3 = 9,
71}; 73};
72 74
73enum mdp5_ctl_mode { 75enum mdp5_ctl_mode {
@@ -126,7 +128,11 @@ enum mdp5_client_id {
126 CID_RGB0 = 16, 128 CID_RGB0 = 16,
127 CID_RGB1 = 17, 129 CID_RGB1 = 17,
128 CID_RGB2 = 18, 130 CID_RGB2 = 18,
129 CID_MAX = 19, 131 CID_VIG3_Y = 19,
132 CID_VIG3_CR = 20,
133 CID_VIG3_CB = 21,
134 CID_RGB3 = 22,
135 CID_MAX = 23,
130}; 136};
131 137
132enum mdp5_igc_type { 138enum mdp5_igc_type {
@@ -299,11 +305,34 @@ static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
299#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000 305#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
300#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000 306#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
301 307
302static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000600 + 0x100*i0; } 308static inline uint32_t __offset_CTL(uint32_t idx)
309{
310 switch (idx) {
311 case 0: return (mdp5_cfg->ctl.base[0]);
312 case 1: return (mdp5_cfg->ctl.base[1]);
313 case 2: return (mdp5_cfg->ctl.base[2]);
314 case 3: return (mdp5_cfg->ctl.base[3]);
315 case 4: return (mdp5_cfg->ctl.base[4]);
316 default: return INVALID_IDX(idx);
317 }
318}
319static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000000 + __offset_CTL(i0); }
303 320
304static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; } 321static inline uint32_t __offset_LAYER(uint32_t idx)
322{
323 switch (idx) {
324 case 0: return 0x00000000;
325 case 1: return 0x00000004;
326 case 2: return 0x00000008;
327 case 3: return 0x0000000c;
328 case 4: return 0x00000010;
329 case 5: return 0x00000024;
330 default: return INVALID_IDX(idx);
331 }
332}
333static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
305 334
306static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; } 335static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
307#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007 336#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007
308#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0 337#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0
309static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val) 338static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val)
@@ -354,8 +383,20 @@ static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
354} 383}
355#define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000 384#define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000
356#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000 385#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000
386#define MDP5_CTL_LAYER_REG_VIG3__MASK 0x1c000000
387#define MDP5_CTL_LAYER_REG_VIG3__SHIFT 26
388static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(enum mdp_mixer_stage_id val)
389{
390 return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK;
391}
392#define MDP5_CTL_LAYER_REG_RGB3__MASK 0xe0000000
393#define MDP5_CTL_LAYER_REG_RGB3__SHIFT 29
394static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(enum mdp_mixer_stage_id val)
395{
396 return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK;
397}
357 398
358static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000614 + 0x100*i0; } 399static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000014 + __offset_CTL(i0); }
359#define MDP5_CTL_OP_MODE__MASK 0x0000000f 400#define MDP5_CTL_OP_MODE__MASK 0x0000000f
360#define MDP5_CTL_OP_MODE__SHIFT 0 401#define MDP5_CTL_OP_MODE__SHIFT 0
361static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val) 402static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val)
@@ -377,7 +418,7 @@ static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val)
377 return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK; 418 return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK;
378} 419}
379 420
380static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000618 + 0x100*i0; } 421static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000018 + __offset_CTL(i0); }
381#define MDP5_CTL_FLUSH_VIG0 0x00000001 422#define MDP5_CTL_FLUSH_VIG0 0x00000001
382#define MDP5_CTL_FLUSH_VIG1 0x00000002 423#define MDP5_CTL_FLUSH_VIG1 0x00000002
383#define MDP5_CTL_FLUSH_VIG2 0x00000004 424#define MDP5_CTL_FLUSH_VIG2 0x00000004
@@ -387,26 +428,48 @@ static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000618 + 0x1
387#define MDP5_CTL_FLUSH_LM0 0x00000040 428#define MDP5_CTL_FLUSH_LM0 0x00000040
388#define MDP5_CTL_FLUSH_LM1 0x00000080 429#define MDP5_CTL_FLUSH_LM1 0x00000080
389#define MDP5_CTL_FLUSH_LM2 0x00000100 430#define MDP5_CTL_FLUSH_LM2 0x00000100
431#define MDP5_CTL_FLUSH_LM3 0x00000200
432#define MDP5_CTL_FLUSH_LM4 0x00000400
390#define MDP5_CTL_FLUSH_DMA0 0x00000800 433#define MDP5_CTL_FLUSH_DMA0 0x00000800
391#define MDP5_CTL_FLUSH_DMA1 0x00001000 434#define MDP5_CTL_FLUSH_DMA1 0x00001000
392#define MDP5_CTL_FLUSH_DSPP0 0x00002000 435#define MDP5_CTL_FLUSH_DSPP0 0x00002000
393#define MDP5_CTL_FLUSH_DSPP1 0x00004000 436#define MDP5_CTL_FLUSH_DSPP1 0x00004000
394#define MDP5_CTL_FLUSH_DSPP2 0x00008000 437#define MDP5_CTL_FLUSH_DSPP2 0x00008000
395#define MDP5_CTL_FLUSH_CTL 0x00020000 438#define MDP5_CTL_FLUSH_CTL 0x00020000
439#define MDP5_CTL_FLUSH_VIG3 0x00040000
440#define MDP5_CTL_FLUSH_RGB3 0x00080000
441#define MDP5_CTL_FLUSH_LM5 0x00100000
442#define MDP5_CTL_FLUSH_DSPP3 0x00200000
396 443
397static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000061c + 0x100*i0; } 444static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); }
398 445
399static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000620 + 0x100*i0; } 446static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); }
400 447
401static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; } 448static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
449{
450 switch (idx) {
451 case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]);
452 case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]);
453 case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]);
454 case SSPP_RGB0: return (mdp5_cfg->pipe_rgb.base[0]);
455 case SSPP_RGB1: return (mdp5_cfg->pipe_rgb.base[1]);
456 case SSPP_RGB2: return (mdp5_cfg->pipe_rgb.base[2]);
457 case SSPP_DMA0: return (mdp5_cfg->pipe_dma.base[0]);
458 case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]);
459 case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]);
460 case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]);
461 default: return INVALID_IDX(idx);
462 }
463}
464static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); }
402 465
403static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000014c4 + 0x400*i0; } 466static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000002c4 + __offset_PIPE(i0); }
404 467
405static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000014f0 + 0x400*i0; } 468static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000002f0 + __offset_PIPE(i0); }
406 469
407static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00001500 + 0x400*i0; } 470static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00000300 + __offset_PIPE(i0); }
408 471
409static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; } 472static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); }
410#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000 473#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
411#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16 474#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
412static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val) 475static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
@@ -420,7 +483,7 @@ static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val)
420 return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK; 483 return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK;
421} 484}
422 485
423static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00001204 + 0x400*i0; } 486static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00000004 + __offset_PIPE(i0); }
424#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000 487#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000
425#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16 488#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16
426static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val) 489static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val)
@@ -434,7 +497,7 @@ static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val)
434 return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK; 497 return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK;
435} 498}
436 499
437static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00001208 + 0x400*i0; } 500static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00000008 + __offset_PIPE(i0); }
438#define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000 501#define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000
439#define MDP5_PIPE_SRC_XY_Y__SHIFT 16 502#define MDP5_PIPE_SRC_XY_Y__SHIFT 16
440static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val) 503static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val)
@@ -448,7 +511,7 @@ static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val)
448 return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK; 511 return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK;
449} 512}
450 513
451static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000120c + 0x400*i0; } 514static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000000c + __offset_PIPE(i0); }
452#define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000 515#define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000
453#define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16 516#define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16
454static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val) 517static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val)
@@ -462,7 +525,7 @@ static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val)
462 return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK; 525 return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK;
463} 526}
464 527
465static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00001210 + 0x400*i0; } 528static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00000010 + __offset_PIPE(i0); }
466#define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000 529#define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000
467#define MDP5_PIPE_OUT_XY_Y__SHIFT 16 530#define MDP5_PIPE_OUT_XY_Y__SHIFT 16
468static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val) 531static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val)
@@ -476,15 +539,15 @@ static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val)
476 return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK; 539 return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK;
477} 540}
478 541
479static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00001214 + 0x400*i0; } 542static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00000014 + __offset_PIPE(i0); }
480 543
481static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00001218 + 0x400*i0; } 544static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00000018 + __offset_PIPE(i0); }
482 545
483static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000121c + 0x400*i0; } 546static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000001c + __offset_PIPE(i0); }
484 547
485static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00001220 + 0x400*i0; } 548static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00000020 + __offset_PIPE(i0); }
486 549
487static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00001224 + 0x400*i0; } 550static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00000024 + __offset_PIPE(i0); }
488#define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff 551#define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
489#define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0 552#define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0
490static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val) 553static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val)
@@ -498,7 +561,7 @@ static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val)
498 return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK; 561 return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK;
499} 562}
500 563
501static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00001228 + 0x400*i0; } 564static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00000028 + __offset_PIPE(i0); }
502#define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff 565#define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
503#define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0 566#define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0
504static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val) 567static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val)
@@ -512,9 +575,9 @@ static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val)
512 return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK; 575 return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK;
513} 576}
514 577
515static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000122c + 0x400*i0; } 578static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000002c + __offset_PIPE(i0); }
516 579
517static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00001230 + 0x400*i0; } 580static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00000030 + __offset_PIPE(i0); }
518#define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 581#define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
519#define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 582#define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
520static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val) 583static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
@@ -568,7 +631,7 @@ static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp5_chroma_samp_ty
568 return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK; 631 return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK;
569} 632}
570 633
571static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00001234 + 0x400*i0; } 634static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00000034 + __offset_PIPE(i0); }
572#define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff 635#define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
573#define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0 636#define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
574static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val) 637static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
@@ -594,7 +657,7 @@ static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
594 return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK; 657 return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK;
595} 658}
596 659
597static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00001238 + 0x400*i0; } 660static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00000038 + __offset_PIPE(i0); }
598#define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001 661#define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001
599#define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006 662#define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006
600#define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1 663#define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1
@@ -610,29 +673,29 @@ static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val)
610#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000 673#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000
611#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000 674#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000
612 675
613static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000123c + 0x400*i0; } 676static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000003c + __offset_PIPE(i0); }
614 677
615static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00001248 + 0x400*i0; } 678static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00000048 + __offset_PIPE(i0); }
616 679
617static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000124c + 0x400*i0; } 680static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000004c + __offset_PIPE(i0); }
618 681
619static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00001250 + 0x400*i0; } 682static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00000050 + __offset_PIPE(i0); }
620 683
621static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00001254 + 0x400*i0; } 684static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00000054 + __offset_PIPE(i0); }
622 685
623static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00001258 + 0x400*i0; } 686static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00000058 + __offset_PIPE(i0); }
624 687
625static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00001270 + 0x400*i0; } 688static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00000070 + __offset_PIPE(i0); }
626 689
627static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000012a4 + 0x400*i0; } 690static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000000a4 + __offset_PIPE(i0); }
628 691
629static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000012a8 + 0x400*i0; } 692static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000000a8 + __offset_PIPE(i0); }
630 693
631static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000012ac + 0x400*i0; } 694static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000000ac + __offset_PIPE(i0); }
632 695
633static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000012b0 + 0x400*i0; } 696static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000000b0 + __offset_PIPE(i0); }
634 697
635static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000012b4 + 0x400*i0; } 698static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000000b4 + __offset_PIPE(i0); }
636#define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff 699#define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff
637#define MDP5_PIPE_DECIMATION_VERT__SHIFT 0 700#define MDP5_PIPE_DECIMATION_VERT__SHIFT 0
638static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val) 701static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val)
@@ -646,7 +709,7 @@ static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val)
646 return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK; 709 return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK;
647} 710}
648 711
649static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00001404 + 0x400*i0; } 712static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); }
650#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001 713#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001
651#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002 714#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002
652#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK 0x00000300 715#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK 0x00000300
@@ -686,23 +749,34 @@ static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_
686 return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK; 749 return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK;
687} 750}
688 751
689static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00001410 + 0x400*i0; } 752static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); }
690 753
691static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00001414 + 0x400*i0; } 754static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00000214 + __offset_PIPE(i0); }
692 755
693static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00001420 + 0x400*i0; } 756static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00000220 + __offset_PIPE(i0); }
694 757
695static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00001424 + 0x400*i0; } 758static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00000224 + __offset_PIPE(i0); }
696 759
697static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00003200 + 0x400*i0; } 760static inline uint32_t __offset_LM(uint32_t idx)
761{
762 switch (idx) {
763 case 0: return (mdp5_cfg->lm.base[0]);
764 case 1: return (mdp5_cfg->lm.base[1]);
765 case 2: return (mdp5_cfg->lm.base[2]);
766 case 3: return (mdp5_cfg->lm.base[3]);
767 case 4: return (mdp5_cfg->lm.base[4]);
768 default: return INVALID_IDX(idx);
769 }
770}
771static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00000000 + __offset_LM(i0); }
698 772
699static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00003200 + 0x400*i0; } 773static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00000000 + __offset_LM(i0); }
700#define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002 774#define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002
701#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004 775#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004
702#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008 776#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008
703#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010 777#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010
704 778
705static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00003204 + 0x400*i0; } 779static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); }
706#define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000 780#define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000
707#define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16 781#define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16
708static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val) 782static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val)
@@ -716,13 +790,13 @@ static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val)
716 return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK; 790 return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK;
717} 791}
718 792
719static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00003208 + 0x400*i0; } 793static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00000008 + __offset_LM(i0); }
720 794
721static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00003210 + 0x400*i0; } 795static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); }
722 796
723static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; } 797static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; }
724 798
725static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; } 799static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; }
726#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003 800#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003
727#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0 801#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0
728static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val) 802static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
@@ -744,57 +818,67 @@ static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val)
744#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000 818#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000
745#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000 819#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000
746 820
747static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003224 + 0x400*i0 + 0x30*i1; } 821static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + 0x30*i1; }
748 822
749static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003228 + 0x400*i0 + 0x30*i1; } 823static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + 0x30*i1; }
750 824
751static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000322c + 0x400*i0 + 0x30*i1; } 825static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000002c + __offset_LM(i0) + 0x30*i1; }
752 826
753static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003230 + 0x400*i0 + 0x30*i1; } 827static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000030 + __offset_LM(i0) + 0x30*i1; }
754 828
755static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003234 + 0x400*i0 + 0x30*i1; } 829static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000034 + __offset_LM(i0) + 0x30*i1; }
756 830
757static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003238 + 0x400*i0 + 0x30*i1; } 831static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000038 + __offset_LM(i0) + 0x30*i1; }
758 832
759static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000323c + 0x400*i0 + 0x30*i1; } 833static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000003c + __offset_LM(i0) + 0x30*i1; }
760 834
761static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003240 + 0x400*i0 + 0x30*i1; } 835static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000040 + __offset_LM(i0) + 0x30*i1; }
762 836
763static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003244 + 0x400*i0 + 0x30*i1; } 837static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000044 + __offset_LM(i0) + 0x30*i1; }
764 838
765static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003248 + 0x400*i0 + 0x30*i1; } 839static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000048 + __offset_LM(i0) + 0x30*i1; }
766 840
767static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000032e0 + 0x400*i0; } 841static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); }
768 842
769static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000032e4 + 0x400*i0; } 843static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000000e4 + __offset_LM(i0); }
770 844
771static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000032e8 + 0x400*i0; } 845static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000000e8 + __offset_LM(i0); }
772 846
773static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000032dc + 0x400*i0; } 847static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000000dc + __offset_LM(i0); }
774 848
775static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000032ec + 0x400*i0; } 849static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000000ec + __offset_LM(i0); }
776 850
777static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000032f0 + 0x400*i0; } 851static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000000f0 + __offset_LM(i0); }
778 852
779static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000032f4 + 0x400*i0; } 853static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000000f4 + __offset_LM(i0); }
780 854
781static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000032f8 + 0x400*i0; } 855static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000000f8 + __offset_LM(i0); }
782 856
783static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000032fc + 0x400*i0; } 857static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000000fc + __offset_LM(i0); }
784 858
785static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00003300 + 0x400*i0; } 859static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00000100 + __offset_LM(i0); }
786 860
787static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00003304 + 0x400*i0; } 861static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00000104 + __offset_LM(i0); }
788 862
789static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00003308 + 0x400*i0; } 863static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00000108 + __offset_LM(i0); }
790 864
791static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000330c + 0x400*i0; } 865static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000010c + __offset_LM(i0); }
792 866
793static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00003310 + 0x400*i0; } 867static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00000110 + __offset_LM(i0); }
794 868
795static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00004600 + 0x400*i0; } 869static inline uint32_t __offset_DSPP(uint32_t idx)
870{
871 switch (idx) {
872 case 0: return (mdp5_cfg->dspp.base[0]);
873 case 1: return (mdp5_cfg->dspp.base[1]);
874 case 2: return (mdp5_cfg->dspp.base[2]);
875 case 3: return (mdp5_cfg->dspp.base[3]);
876 default: return INVALID_IDX(idx);
877 }
878}
879static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); }
796 880
797static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00004600 + 0x400*i0; } 881static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); }
798#define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001 882#define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001
799#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e 883#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e
800#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1 884#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1
@@ -811,29 +895,40 @@ static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val)
811#define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000 895#define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000
812#define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000 896#define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000
813 897
814static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00004630 + 0x400*i0; } 898static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00000030 + __offset_DSPP(i0); }
815 899
816static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00004750 + 0x400*i0; } 900static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00000150 + __offset_DSPP(i0); }
817 901
818static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00004810 + 0x400*i0; } 902static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00000210 + __offset_DSPP(i0); }
819 903
820static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00004830 + 0x400*i0; } 904static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00000230 + __offset_DSPP(i0); }
821 905
822static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00004834 + 0x400*i0; } 906static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00000234 + __offset_DSPP(i0); }
823 907
824static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00004838 + 0x400*i0; } 908static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00000238 + __offset_DSPP(i0); }
825 909
826static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000048dc + 0x400*i0; } 910static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000002dc + __offset_DSPP(i0); }
827 911
828static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000048b0 + 0x400*i0; } 912static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); }
829 913
830static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00012500 + 0x200*i0; } 914static inline uint32_t __offset_INTF(uint32_t idx)
915{
916 switch (idx) {
917 case 0: return (mdp5_cfg->intf.base[0]);
918 case 1: return (mdp5_cfg->intf.base[1]);
919 case 2: return (mdp5_cfg->intf.base[2]);
920 case 3: return (mdp5_cfg->intf.base[3]);
921 case 4: return (mdp5_cfg->intf.base[4]);
922 default: return INVALID_IDX(idx);
923 }
924}
925static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); }
831 926
832static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00012500 + 0x200*i0; } 927static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); }
833 928
834static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00012504 + 0x200*i0; } 929static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00000004 + __offset_INTF(i0); }
835 930
836static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00012508 + 0x200*i0; } 931static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00000008 + __offset_INTF(i0); }
837#define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff 932#define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff
838#define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0 933#define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0
839static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val) 934static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val)
@@ -847,23 +942,23 @@ static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val)
847 return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK; 942 return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK;
848} 943}
849 944
850static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0001250c + 0x200*i0; } 945static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0000000c + __offset_INTF(i0); }
851 946
852static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00012510 + 0x200*i0; } 947static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00000010 + __offset_INTF(i0); }
853 948
854static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00012514 + 0x200*i0; } 949static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00000014 + __offset_INTF(i0); }
855 950
856static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00012518 + 0x200*i0; } 951static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00000018 + __offset_INTF(i0); }
857 952
858static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0001251c + 0x200*i0; } 953static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0000001c + __offset_INTF(i0); }
859 954
860static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00012520 + 0x200*i0; } 955static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00000020 + __offset_INTF(i0); }
861 956
862static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00012524 + 0x200*i0; } 957static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00000024 + __offset_INTF(i0); }
863 958
864static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00012528 + 0x200*i0; } 959static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00000028 + __offset_INTF(i0); }
865 960
866static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0001252c + 0x200*i0; } 961static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0000002c + __offset_INTF(i0); }
867#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff 962#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff
868#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0 963#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0
869static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val) 964static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val)
@@ -872,7 +967,7 @@ static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val)
872} 967}
873#define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000 968#define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000
874 969
875static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00012530 + 0x200*i0; } 970static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00000030 + __offset_INTF(i0); }
876#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff 971#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff
877#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0 972#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0
878static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val) 973static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val)
@@ -880,11 +975,11 @@ static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val)
880 return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK; 975 return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK;
881} 976}
882 977
883static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00012534 + 0x200*i0; } 978static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00000034 + __offset_INTF(i0); }
884 979
885static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00012538 + 0x200*i0; } 980static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00000038 + __offset_INTF(i0); }
886 981
887static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0001253c + 0x200*i0; } 982static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0000003c + __offset_INTF(i0); }
888#define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff 983#define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff
889#define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0 984#define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0
890static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val) 985static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val)
@@ -898,7 +993,7 @@ static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val)
898 return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK; 993 return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK;
899} 994}
900 995
901static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00012540 + 0x200*i0; } 996static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00000040 + __offset_INTF(i0); }
902#define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff 997#define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff
903#define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0 998#define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0
904static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val) 999static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val)
@@ -913,124 +1008,132 @@ static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val)
913} 1008}
914#define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000 1009#define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000
915 1010
916static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00012544 + 0x200*i0; } 1011static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00000044 + __offset_INTF(i0); }
917 1012
918static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00012548 + 0x200*i0; } 1013static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00000048 + __offset_INTF(i0); }
919 1014
920static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0001254c + 0x200*i0; } 1015static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0000004c + __offset_INTF(i0); }
921 1016
922static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00012550 + 0x200*i0; } 1017static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00000050 + __offset_INTF(i0); }
923#define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001 1018#define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001
924#define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002 1019#define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002
925#define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004 1020#define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004
926 1021
927static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00012554 + 0x200*i0; } 1022static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00000054 + __offset_INTF(i0); }
928 1023
929static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00012558 + 0x200*i0; } 1024static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00000058 + __offset_INTF(i0); }
930 1025
931static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0001255c + 0x200*i0; } 1026static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0000005c + __offset_INTF(i0); }
932 1027
933static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00012584 + 0x200*i0; } 1028static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00000084 + __offset_INTF(i0); }
934 1029
935static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00012590 + 0x200*i0; } 1030static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00000090 + __offset_INTF(i0); }
936 1031
937static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000125a8 + 0x200*i0; } 1032static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000000a8 + __offset_INTF(i0); }
938 1033
939static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000125ac + 0x200*i0; } 1034static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000000ac + __offset_INTF(i0); }
940 1035
941static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000125b0 + 0x200*i0; } 1036static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000000b0 + __offset_INTF(i0); }
942 1037
943static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000125f0 + 0x200*i0; } 1038static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000000f0 + __offset_INTF(i0); }
944 1039
945static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000125f4 + 0x200*i0; } 1040static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000000f4 + __offset_INTF(i0); }
946 1041
947static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000125f8 + 0x200*i0; } 1042static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000000f8 + __offset_INTF(i0); }
948 1043
949static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00012600 + 0x200*i0; } 1044static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00000100 + __offset_INTF(i0); }
950 1045
951static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00012604 + 0x200*i0; } 1046static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00000104 + __offset_INTF(i0); }
952 1047
953static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00012608 + 0x200*i0; } 1048static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00000108 + __offset_INTF(i0); }
954 1049
955static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0001260c + 0x200*i0; } 1050static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0000010c + __offset_INTF(i0); }
956 1051
957static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00012610 + 0x200*i0; } 1052static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00000110 + __offset_INTF(i0); }
958 1053
959static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00012614 + 0x200*i0; } 1054static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00000114 + __offset_INTF(i0); }
960 1055
961static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00012618 + 0x200*i0; } 1056static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00000118 + __offset_INTF(i0); }
962 1057
963static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0001261c + 0x200*i0; } 1058static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0000011c + __offset_INTF(i0); }
964 1059
965static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00013100 + 0x200*i0; } 1060static inline uint32_t __offset_AD(uint32_t idx)
1061{
1062 switch (idx) {
1063 case 0: return (mdp5_cfg->ad.base[0]);
1064 case 1: return (mdp5_cfg->ad.base[1]);
1065 default: return INVALID_IDX(idx);
1066 }
1067}
1068static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00000000 + __offset_AD(i0); }
966 1069
967static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00013100 + 0x200*i0; } 1070static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00000000 + __offset_AD(i0); }
968 1071
969static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00013104 + 0x200*i0; } 1072static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00000004 + __offset_AD(i0); }
970 1073
971static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00013108 + 0x200*i0; } 1074static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00000008 + __offset_AD(i0); }
972 1075
973static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0001310c + 0x200*i0; } 1076static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0000000c + __offset_AD(i0); }
974 1077
975static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00013110 + 0x200*i0; } 1078static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00000010 + __offset_AD(i0); }
976 1079
977static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00013114 + 0x200*i0; } 1080static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00000014 + __offset_AD(i0); }
978 1081
979static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00013118 + 0x200*i0; } 1082static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00000018 + __offset_AD(i0); }
980 1083
981static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0001311c + 0x200*i0; } 1084static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0000001c + __offset_AD(i0); }
982 1085
983static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00013120 + 0x200*i0; } 1086static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00000020 + __offset_AD(i0); }
984 1087
985static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00013124 + 0x200*i0; } 1088static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00000024 + __offset_AD(i0); }
986 1089
987static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00013128 + 0x200*i0; } 1090static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00000028 + __offset_AD(i0); }
988 1091
989static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0001312c + 0x200*i0; } 1092static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0000002c + __offset_AD(i0); }
990 1093
991static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00013130 + 0x200*i0; } 1094static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00000030 + __offset_AD(i0); }
992 1095
993static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00013134 + 0x200*i0; } 1096static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00000034 + __offset_AD(i0); }
994 1097
995static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00013138 + 0x200*i0; } 1098static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00000038 + __offset_AD(i0); }
996 1099
997static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0001317c + 0x200*i0; } 1100static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0000007c + __offset_AD(i0); }
998 1101
999static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000131c8 + 0x200*i0; } 1102static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000000c8 + __offset_AD(i0); }
1000 1103
1001static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000131cc + 0x200*i0; } 1104static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000000cc + __offset_AD(i0); }
1002 1105
1003static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000131d0 + 0x200*i0; } 1106static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000000d0 + __offset_AD(i0); }
1004 1107
1005static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000131d4 + 0x200*i0; } 1108static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000000d4 + __offset_AD(i0); }
1006 1109
1007static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000131d8 + 0x200*i0; } 1110static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000000d8 + __offset_AD(i0); }
1008 1111
1009static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000131dc + 0x200*i0; } 1112static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000000dc + __offset_AD(i0); }
1010 1113
1011static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000131e0 + 0x200*i0; } 1114static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000000e0 + __offset_AD(i0); }
1012 1115
1013static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000131e8 + 0x200*i0; } 1116static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000000e8 + __offset_AD(i0); }
1014 1117
1015static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000131ec + 0x200*i0; } 1118static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000000ec + __offset_AD(i0); }
1016 1119
1017static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000131f0 + 0x200*i0; } 1120static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000000f0 + __offset_AD(i0); }
1018 1121
1019static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000131f4 + 0x200*i0; } 1122static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000000f4 + __offset_AD(i0); }
1020 1123
1021static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000131f8 + 0x200*i0; } 1124static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000000f8 + __offset_AD(i0); }
1022 1125
1023static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00013200 + 0x200*i0; } 1126static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00000100 + __offset_AD(i0); }
1024 1127
1025static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00013244 + 0x200*i0; } 1128static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00000144 + __offset_AD(i0); }
1026 1129
1027static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00013248 + 0x200*i0; } 1130static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00000148 + __offset_AD(i0); }
1028 1131
1029static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0001324c + 0x200*i0; } 1132static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0000014c + __offset_AD(i0); }
1030 1133
1031static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00013254 + 0x200*i0; } 1134static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00000154 + __offset_AD(i0); }
1032 1135
1033static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00013258 + 0x200*i0; } 1136static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00000158 + __offset_AD(i0); }
1034 1137
1035 1138
1036#endif /* MDP5_XML */ 1139#endif /* MDP5_XML */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 71510ee26e96..31a2c6331a1d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -26,14 +26,98 @@ static const char *iommu_ports[] = {
26 26
27static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev); 27static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
28 28
29static int mdp5_hw_init(struct msm_kms *kms) 29const struct mdp5_config *mdp5_cfg;
30
31static const struct mdp5_config msm8x74_config = {
32 .name = "msm8x74",
33 .ctl = {
34 .count = 5,
35 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
36 },
37 .pipe_vig = {
38 .count = 3,
39 .base = { 0x01200, 0x01600, 0x01a00 },
40 },
41 .pipe_rgb = {
42 .count = 3,
43 .base = { 0x01e00, 0x02200, 0x02600 },
44 },
45 .pipe_dma = {
46 .count = 2,
47 .base = { 0x02a00, 0x02e00 },
48 },
49 .lm = {
50 .count = 5,
51 .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
52 },
53 .dspp = {
54 .count = 3,
55 .base = { 0x04600, 0x04a00, 0x04e00 },
56 },
57 .ad = {
58 .count = 2,
59 .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
60 },
61 .intf = {
62 .count = 4,
63 .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
64 },
65};
66
67static const struct mdp5_config apq8084_config = {
68 .name = "apq8084",
69 .ctl = {
70 .count = 5,
71 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
72 },
73 .pipe_vig = {
74 .count = 4,
75 .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
76 },
77 .pipe_rgb = {
78 .count = 4,
79 .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
80 },
81 .pipe_dma = {
82 .count = 2,
83 .base = { 0x03200, 0x03600 },
84 },
85 .lm = {
86 .count = 6,
87 .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
88 },
89 .dspp = {
90 .count = 4,
91 .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
92
93 },
94 .ad = {
95 .count = 3,
96 .base = { 0x13500, 0x13700, 0x13900 },
97 },
98 .intf = {
99 .count = 5,
100 .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
101 },
102};
103
104struct mdp5_config_entry {
105 int revision;
106 const struct mdp5_config *config;
107};
108
109static const struct mdp5_config_entry mdp5_configs[] = {
110 { .revision = 0, .config = &msm8x74_config },
111 { .revision = 2, .config = &msm8x74_config },
112 { .revision = 3, .config = &apq8084_config },
113};
114
115static int mdp5_select_hw_cfg(struct msm_kms *kms)
30{ 116{
31 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 117 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
32 struct drm_device *dev = mdp5_kms->dev; 118 struct drm_device *dev = mdp5_kms->dev;
33 uint32_t version, major, minor; 119 uint32_t version, major, minor;
34 int ret = 0; 120 int i, ret = 0;
35
36 pm_runtime_get_sync(dev->dev);
37 121
38 mdp5_enable(mdp5_kms); 122 mdp5_enable(mdp5_kms);
39 version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION); 123 version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
@@ -44,8 +128,8 @@ static int mdp5_hw_init(struct msm_kms *kms)
44 128
45 DBG("found MDP5 version v%d.%d", major, minor); 129 DBG("found MDP5 version v%d.%d", major, minor);
46 130
47 if ((major != 1) || ((minor != 0) && (minor != 2))) { 131 if (major != 1) {
48 dev_err(dev->dev, "unexpected MDP version: v%d.%d\n", 132 dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
49 major, minor); 133 major, minor);
50 ret = -ENXIO; 134 ret = -ENXIO;
51 goto out; 135 goto out;
@@ -53,6 +137,35 @@ static int mdp5_hw_init(struct msm_kms *kms)
53 137
54 mdp5_kms->rev = minor; 138 mdp5_kms->rev = minor;
55 139
140 /* only after mdp5_cfg global pointer's init can we access the hw */
141 for (i = 0; i < ARRAY_SIZE(mdp5_configs); i++) {
142 if (mdp5_configs[i].revision != minor)
143 continue;
144 mdp5_kms->hw_cfg = mdp5_cfg = mdp5_configs[i].config;
145 break;
146 }
147 if (unlikely(!mdp5_kms->hw_cfg)) {
148 dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
149 major, minor);
150 ret = -ENXIO;
151 goto out;
152 }
153
154 DBG("MDP5: %s config selected", mdp5_kms->hw_cfg->name);
155
156 return 0;
157out:
158 return ret;
159}
160
161static int mdp5_hw_init(struct msm_kms *kms)
162{
163 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
164 struct drm_device *dev = mdp5_kms->dev;
165 int i;
166
167 pm_runtime_get_sync(dev->dev);
168
56 /* Magic unknown register writes: 169 /* Magic unknown register writes:
57 * 170 *
58 * W VBIF:0x004 00000001 (mdss_mdp.c:839) 171 * W VBIF:0x004 00000001 (mdss_mdp.c:839)
@@ -78,15 +191,13 @@ static int mdp5_hw_init(struct msm_kms *kms)
78 */ 191 */
79 192
80 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); 193 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
81 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(0), 0);
82 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(1), 0);
83 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(2), 0);
84 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(3), 0);
85 194
86out: 195 for (i = 0; i < mdp5_kms->hw_cfg->ctl.count; i++)
196 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(i), 0);
197
87 pm_runtime_put_sync(dev->dev); 198 pm_runtime_put_sync(dev->dev);
88 199
89 return ret; 200 return 0;
90} 201}
91 202
92static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate, 203static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
@@ -161,7 +272,7 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
161static int modeset_init(struct mdp5_kms *mdp5_kms) 272static int modeset_init(struct mdp5_kms *mdp5_kms)
162{ 273{
163 static const enum mdp5_pipe crtcs[] = { 274 static const enum mdp5_pipe crtcs[] = {
164 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, 275 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
165 }; 276 };
166 struct drm_device *dev = mdp5_kms->dev; 277 struct drm_device *dev = mdp5_kms->dev;
167 struct msm_drm_private *priv = dev->dev_private; 278 struct msm_drm_private *priv = dev->dev_private;
@@ -169,7 +280,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
169 int i, ret; 280 int i, ret;
170 281
171 /* construct CRTCs: */ 282 /* construct CRTCs: */
172 for (i = 0; i < ARRAY_SIZE(crtcs); i++) { 283 for (i = 0; i < mdp5_kms->hw_cfg->pipe_rgb.count; i++) {
173 struct drm_plane *plane; 284 struct drm_plane *plane;
174 struct drm_crtc *crtc; 285 struct drm_crtc *crtc;
175 286
@@ -246,7 +357,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
246 struct mdp5_kms *mdp5_kms; 357 struct mdp5_kms *mdp5_kms;
247 struct msm_kms *kms = NULL; 358 struct msm_kms *kms = NULL;
248 struct msm_mmu *mmu; 359 struct msm_mmu *mmu;
249 int ret; 360 int i, ret;
250 361
251 mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL); 362 mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
252 if (!mdp5_kms) { 363 if (!mdp5_kms) {
@@ -307,20 +418,22 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
307 418
308 ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk); 419 ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk);
309 420
421 ret = mdp5_select_hw_cfg(kms);
422 if (ret)
423 goto fail;
424
310 /* make sure things are off before attaching iommu (bootloader could 425 /* make sure things are off before attaching iommu (bootloader could
311 * have left things on, in which case we'll start getting faults if 426 * have left things on, in which case we'll start getting faults if
312 * we don't disable): 427 * we don't disable):
313 */ 428 */
314 mdp5_enable(mdp5_kms); 429 mdp5_enable(mdp5_kms);
315 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(0), 0); 430 for (i = 0; i < mdp5_kms->hw_cfg->intf.count; i++)
316 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(1), 0); 431 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
317 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(2), 0);
318 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(3), 0);
319 mdp5_disable(mdp5_kms); 432 mdp5_disable(mdp5_kms);
320 mdelay(16); 433 mdelay(16);
321 434
322 if (config->iommu) { 435 if (config->iommu) {
323 mmu = msm_iommu_new(dev, config->iommu); 436 mmu = msm_iommu_new(&pdev->dev, config->iommu);
324 if (IS_ERR(mmu)) { 437 if (IS_ERR(mmu)) {
325 ret = PTR_ERR(mmu); 438 ret = PTR_ERR(mmu);
326 dev_err(dev->dev, "failed to init iommu: %d\n", ret); 439 dev_err(dev->dev, "failed to init iommu: %d\n", ret);
@@ -368,5 +481,11 @@ static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
368#ifdef CONFIG_OF 481#ifdef CONFIG_OF
369 /* TODO */ 482 /* TODO */
370#endif 483#endif
484 config.iommu = iommu_domain_alloc(&platform_bus_type);
485 /* TODO hard-coded in downstream mdss, but should it be? */
486 config.max_clk = 200000000;
487 /* TODO get from DT: */
488 config.smp_blk_cnt = 22;
489
371 return &config; 490 return &config;
372} 491}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 6e981b692d1d..5bf340dd0f00 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -21,6 +21,24 @@
21#include "msm_drv.h" 21#include "msm_drv.h"
22#include "msm_kms.h" 22#include "msm_kms.h"
23#include "mdp/mdp_kms.h" 23#include "mdp/mdp_kms.h"
24/* dynamic offsets used by mdp5.xml.h (initialized in mdp5_kms.c) */
25#define MDP5_MAX_BASES 8
26struct mdp5_sub_block {
27 int count;
28 uint32_t base[MDP5_MAX_BASES];
29};
30struct mdp5_config {
31 char *name;
32 struct mdp5_sub_block ctl;
33 struct mdp5_sub_block pipe_vig;
34 struct mdp5_sub_block pipe_rgb;
35 struct mdp5_sub_block pipe_dma;
36 struct mdp5_sub_block lm;
37 struct mdp5_sub_block dspp;
38 struct mdp5_sub_block ad;
39 struct mdp5_sub_block intf;
40};
41extern const struct mdp5_config *mdp5_cfg;
24#include "mdp5.xml.h" 42#include "mdp5.xml.h"
25#include "mdp5_smp.h" 43#include "mdp5_smp.h"
26 44
@@ -30,6 +48,7 @@ struct mdp5_kms {
30 struct drm_device *dev; 48 struct drm_device *dev;
31 49
32 int rev; 50 int rev;
51 const struct mdp5_config *hw_cfg;
33 52
34 /* mapper-id used to request GEM buffer mapped for scanout: */ 53 /* mapper-id used to request GEM buffer mapped for scanout: */
35 int id; 54 int id;
@@ -82,6 +101,7 @@ static inline const char *pipe2name(enum mdp5_pipe pipe)
82 NAME(VIG0), NAME(VIG1), NAME(VIG2), 101 NAME(VIG0), NAME(VIG1), NAME(VIG2),
83 NAME(RGB0), NAME(RGB1), NAME(RGB2), 102 NAME(RGB0), NAME(RGB1), NAME(RGB2),
84 NAME(DMA0), NAME(DMA1), 103 NAME(DMA0), NAME(DMA1),
104 NAME(VIG3), NAME(RGB3),
85#undef NAME 105#undef NAME
86 }; 106 };
87 return names[pipe]; 107 return names[pipe];
@@ -98,6 +118,8 @@ static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
98 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2; 118 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
99 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0; 119 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
100 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1; 120 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
121 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
122 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
101 default: return 0; 123 default: return 0;
102 } 124 }
103} 125}
@@ -108,6 +130,7 @@ static inline int pipe2nclients(enum mdp5_pipe pipe)
108 case SSPP_RGB0: 130 case SSPP_RGB0:
109 case SSPP_RGB1: 131 case SSPP_RGB1:
110 case SSPP_RGB2: 132 case SSPP_RGB2:
133 case SSPP_RGB3:
111 return 1; 134 return 1;
112 default: 135 default:
113 return 3; 136 return 3;
@@ -126,6 +149,8 @@ static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
126 case SSPP_RGB2: return CID_RGB2; 149 case SSPP_RGB2: return CID_RGB2;
127 case SSPP_DMA0: return CID_DMA0_Y + plane; 150 case SSPP_DMA0: return CID_DMA0_Y + plane;
128 case SSPP_DMA1: return CID_DMA1_Y + plane; 151 case SSPP_DMA1: return CID_DMA1_Y + plane;
152 case SSPP_VIG3: return CID_VIG3_Y + plane;
153 case SSPP_RGB3: return CID_RGB3;
129 default: return CID_UNUSED; 154 default: return CID_UNUSED;
130 } 155 }
131} 156}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index a9629b85b983..64c1afd6030a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
21 21
22Copyright (C) 2013 by the following authors: 22Copyright (C) 2013 by the following authors:
23- Rob Clark <robdclark@gmail.com> (robclark) 23- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9a5d87db5c23..b447c01ad89c 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -181,7 +181,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
181 struct msm_kms *kms; 181 struct msm_kms *kms;
182 int ret; 182 int ret;
183 183
184
185 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 184 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
186 if (!priv) { 185 if (!priv) {
187 dev_err(dev->dev, "failed to allocate private data\n"); 186 dev_err(dev->dev, "failed to allocate private data\n");
@@ -314,13 +313,15 @@ fail:
314 313
315static void load_gpu(struct drm_device *dev) 314static void load_gpu(struct drm_device *dev)
316{ 315{
316 static DEFINE_MUTEX(init_lock);
317 struct msm_drm_private *priv = dev->dev_private; 317 struct msm_drm_private *priv = dev->dev_private;
318 struct msm_gpu *gpu; 318 struct msm_gpu *gpu;
319 319
320 mutex_lock(&init_lock);
321
320 if (priv->gpu) 322 if (priv->gpu)
321 return; 323 goto out;
322 324
323 mutex_lock(&dev->struct_mutex);
324 gpu = a3xx_gpu_init(dev); 325 gpu = a3xx_gpu_init(dev);
325 if (IS_ERR(gpu)) { 326 if (IS_ERR(gpu)) {
326 dev_warn(dev->dev, "failed to load a3xx gpu\n"); 327 dev_warn(dev->dev, "failed to load a3xx gpu\n");
@@ -330,7 +331,9 @@ static void load_gpu(struct drm_device *dev)
330 331
331 if (gpu) { 332 if (gpu) {
332 int ret; 333 int ret;
334 mutex_lock(&dev->struct_mutex);
333 gpu->funcs->pm_resume(gpu); 335 gpu->funcs->pm_resume(gpu);
336 mutex_unlock(&dev->struct_mutex);
334 ret = gpu->funcs->hw_init(gpu); 337 ret = gpu->funcs->hw_init(gpu);
335 if (ret) { 338 if (ret) {
336 dev_err(dev->dev, "gpu hw init failed: %d\n", ret); 339 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
@@ -340,12 +343,12 @@ static void load_gpu(struct drm_device *dev)
340 /* give inactive pm a chance to kick in: */ 343 /* give inactive pm a chance to kick in: */
341 msm_gpu_retire(gpu); 344 msm_gpu_retire(gpu);
342 } 345 }
343
344 } 346 }
345 347
346 priv->gpu = gpu; 348 priv->gpu = gpu;
347 349
348 mutex_unlock(&dev->struct_mutex); 350out:
351 mutex_unlock(&init_lock);
349} 352}
350 353
351static int msm_open(struct drm_device *dev, struct drm_file *file) 354static int msm_open(struct drm_device *dev, struct drm_file *file)
@@ -906,25 +909,22 @@ static int compare_of(struct device *dev, void *data)
906 return dev->of_node == data; 909 return dev->of_node == data;
907} 910}
908 911
909static int msm_drm_add_components(struct device *master, struct master *m) 912static int add_components(struct device *dev, struct component_match **matchptr,
913 const char *name)
910{ 914{
911 struct device_node *np = master->of_node; 915 struct device_node *np = dev->of_node;
912 unsigned i; 916 unsigned i;
913 int ret;
914 917
915 for (i = 0; ; i++) { 918 for (i = 0; ; i++) {
916 struct device_node *node; 919 struct device_node *node;
917 920
918 node = of_parse_phandle(np, "connectors", i); 921 node = of_parse_phandle(np, name, i);
919 if (!node) 922 if (!node)
920 break; 923 break;
921 924
922 ret = component_master_add_child(m, compare_of, node); 925 component_match_add(dev, matchptr, compare_of, node);
923 of_node_put(node);
924
925 if (ret)
926 return ret;
927 } 926 }
927
928 return 0; 928 return 0;
929} 929}
930#else 930#else
@@ -932,9 +932,34 @@ static int compare_dev(struct device *dev, void *data)
932{ 932{
933 return dev == data; 933 return dev == data;
934} 934}
935#endif
936
937static int msm_drm_bind(struct device *dev)
938{
939 return drm_platform_init(&msm_driver, to_platform_device(dev));
940}
941
942static void msm_drm_unbind(struct device *dev)
943{
944 drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
945}
946
947static const struct component_master_ops msm_drm_ops = {
948 .bind = msm_drm_bind,
949 .unbind = msm_drm_unbind,
950};
951
952/*
953 * Platform driver:
954 */
935 955
936static int msm_drm_add_components(struct device *master, struct master *m) 956static int msm_pdev_probe(struct platform_device *pdev)
937{ 957{
958 struct component_match *match = NULL;
959#ifdef CONFIG_OF
960 add_components(&pdev->dev, &match, "connectors");
961 add_components(&pdev->dev, &match, "gpus");
962#else
938 /* For non-DT case, it kinda sucks. We don't actually have a way 963 /* For non-DT case, it kinda sucks. We don't actually have a way
939 * to know whether or not we are waiting for certain devices (or if 964 * to know whether or not we are waiting for certain devices (or if
940 * they are simply not present). But for non-DT we only need to 965 * they are simply not present). But for non-DT we only need to
@@ -958,41 +983,12 @@ static int msm_drm_add_components(struct device *master, struct master *m)
958 return -EPROBE_DEFER; 983 return -EPROBE_DEFER;
959 } 984 }
960 985
961 ret = component_master_add_child(m, compare_dev, dev); 986 component_match_add(&pdev->dev, &match, compare_dev, dev);
962 if (ret) {
963 DBG("could not add child: %d", ret);
964 return ret;
965 }
966 } 987 }
967
968 return 0;
969}
970#endif 988#endif
971 989
972static int msm_drm_bind(struct device *dev)
973{
974 return drm_platform_init(&msm_driver, to_platform_device(dev));
975}
976
977static void msm_drm_unbind(struct device *dev)
978{
979 drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
980}
981
982static const struct component_master_ops msm_drm_ops = {
983 .add_components = msm_drm_add_components,
984 .bind = msm_drm_bind,
985 .unbind = msm_drm_unbind,
986};
987
988/*
989 * Platform driver:
990 */
991
992static int msm_pdev_probe(struct platform_device *pdev)
993{
994 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 990 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
995 return component_master_add(&pdev->dev, &msm_drm_ops); 991 return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
996} 992}
997 993
998static int msm_pdev_remove(struct platform_device *pdev) 994static int msm_pdev_remove(struct platform_device *pdev)
@@ -1008,7 +1004,8 @@ static const struct platform_device_id msm_id[] = {
1008}; 1004};
1009 1005
1010static const struct of_device_id dt_match[] = { 1006static const struct of_device_id dt_match[] = {
1011 { .compatible = "qcom,mdss_mdp" }, 1007 { .compatible = "qcom,mdp" }, /* mdp4 */
1008 { .compatible = "qcom,mdss_mdp" }, /* mdp5 */
1012 {} 1009 {}
1013}; 1010};
1014MODULE_DEVICE_TABLE(of, dt_match); 1011MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 5107fc4826bc..9c5221ce391a 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -19,6 +19,11 @@
19 19
20#include "drm_crtc.h" 20#include "drm_crtc.h"
21#include "drm_fb_helper.h" 21#include "drm_fb_helper.h"
22#include "msm_gem.h"
23
24extern int msm_gem_mmap_obj(struct drm_gem_object *obj,
25 struct vm_area_struct *vma);
26static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
22 27
23/* 28/*
24 * fbdev funcs, to implement legacy fbdev interface on top of drm driver 29 * fbdev funcs, to implement legacy fbdev interface on top of drm driver
@@ -43,6 +48,7 @@ static struct fb_ops msm_fb_ops = {
43 .fb_fillrect = sys_fillrect, 48 .fb_fillrect = sys_fillrect,
44 .fb_copyarea = sys_copyarea, 49 .fb_copyarea = sys_copyarea,
45 .fb_imageblit = sys_imageblit, 50 .fb_imageblit = sys_imageblit,
51 .fb_mmap = msm_fbdev_mmap,
46 52
47 .fb_check_var = drm_fb_helper_check_var, 53 .fb_check_var = drm_fb_helper_check_var,
48 .fb_set_par = drm_fb_helper_set_par, 54 .fb_set_par = drm_fb_helper_set_par,
@@ -51,6 +57,31 @@ static struct fb_ops msm_fb_ops = {
51 .fb_setcmap = drm_fb_helper_setcmap, 57 .fb_setcmap = drm_fb_helper_setcmap,
52}; 58};
53 59
60static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
61{
62 struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
63 struct msm_fbdev *fbdev = to_msm_fbdev(helper);
64 struct drm_gem_object *drm_obj = fbdev->bo;
65 struct drm_device *dev = helper->dev;
66 int ret = 0;
67
68 if (drm_device_is_unplugged(dev))
69 return -ENODEV;
70
71 mutex_lock(&dev->struct_mutex);
72
73 ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma);
74
75 mutex_unlock(&dev->struct_mutex);
76
77 if (ret) {
78 pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
79 return ret;
80 }
81
82 return msm_gem_mmap_obj(drm_obj, vma);
83}
84
54static int msm_fbdev_create(struct drm_fb_helper *helper, 85static int msm_fbdev_create(struct drm_fb_helper *helper,
55 struct drm_fb_helper_surface_size *sizes) 86 struct drm_fb_helper_surface_size *sizes)
56{ 87{
@@ -104,8 +135,16 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
104 135
105 mutex_lock(&dev->struct_mutex); 136 mutex_lock(&dev->struct_mutex);
106 137
107 /* TODO implement our own fb_mmap so we don't need this: */ 138 /*
108 msm_gem_get_iova_locked(fbdev->bo, 0, &paddr); 139 * NOTE: if we can be guaranteed to be able to map buffer
140 * in panic (ie. lock-safe, etc) we could avoid pinning the
141 * buffer now:
142 */
143 ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
144 if (ret) {
145 dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
146 goto fail;
147 }
109 148
110 fbi = framebuffer_alloc(0, dev->dev); 149 fbi = framebuffer_alloc(0, dev->dev);
111 if (!fbi) { 150 if (!fbi) {
@@ -177,7 +216,7 @@ static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
177 DBG("fbdev: get gamma"); 216 DBG("fbdev: get gamma");
178} 217}
179 218
180static struct drm_fb_helper_funcs msm_fb_helper_funcs = { 219static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
181 .gamma_set = msm_crtc_fb_gamma_set, 220 .gamma_set = msm_crtc_fb_gamma_set,
182 .gamma_get = msm_crtc_fb_gamma_get, 221 .gamma_get = msm_crtc_fb_gamma_get,
183 .fb_probe = msm_fbdev_create, 222 .fb_probe = msm_fbdev_create,
@@ -189,7 +228,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
189 struct msm_drm_private *priv = dev->dev_private; 228 struct msm_drm_private *priv = dev->dev_private;
190 struct msm_fbdev *fbdev = NULL; 229 struct msm_fbdev *fbdev = NULL;
191 struct drm_fb_helper *helper; 230 struct drm_fb_helper *helper;
192 int ret = 0; 231 int ret;
193 232
194 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); 233 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
195 if (!fbdev) 234 if (!fbdev)
@@ -197,7 +236,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
197 236
198 helper = &fbdev->base; 237 helper = &fbdev->base;
199 238
200 helper->funcs = &msm_fb_helper_funcs; 239 drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs);
201 240
202 ret = drm_fb_helper_init(dev, helper, 241 ret = drm_fb_helper_init(dev, helper,
203 priv->num_crtcs, priv->num_connectors); 242 priv->num_crtcs, priv->num_connectors);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 690d7e7b6d1e..4b1b82adabde 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -73,7 +73,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
73 int npages = obj->size >> PAGE_SHIFT; 73 int npages = obj->size >> PAGE_SHIFT;
74 74
75 if (iommu_present(&platform_bus_type)) 75 if (iommu_present(&platform_bus_type))
76 p = drm_gem_get_pages(obj, 0); 76 p = drm_gem_get_pages(obj);
77 else 77 else
78 p = get_pages_vram(obj, npages); 78 p = get_pages_vram(obj, npages);
79 79
@@ -278,24 +278,23 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
278 uint32_t *iova) 278 uint32_t *iova)
279{ 279{
280 struct msm_gem_object *msm_obj = to_msm_bo(obj); 280 struct msm_gem_object *msm_obj = to_msm_bo(obj);
281 struct drm_device *dev = obj->dev;
282 int ret = 0; 281 int ret = 0;
283 282
284 if (!msm_obj->domain[id].iova) { 283 if (!msm_obj->domain[id].iova) {
285 struct msm_drm_private *priv = obj->dev->dev_private; 284 struct msm_drm_private *priv = obj->dev->dev_private;
286 struct msm_mmu *mmu = priv->mmus[id];
287 struct page **pages = get_pages(obj); 285 struct page **pages = get_pages(obj);
288 286
289 if (!mmu) {
290 dev_err(dev->dev, "null MMU pointer\n");
291 return -EINVAL;
292 }
293
294 if (IS_ERR(pages)) 287 if (IS_ERR(pages))
295 return PTR_ERR(pages); 288 return PTR_ERR(pages);
296 289
297 if (iommu_present(&platform_bus_type)) { 290 if (iommu_present(&platform_bus_type)) {
298 uint32_t offset = (uint32_t)mmap_offset(obj); 291 struct msm_mmu *mmu = priv->mmus[id];
292 uint32_t offset;
293
294 if (WARN_ON(!mmu))
295 return -EINVAL;
296
297 offset = (uint32_t)mmap_offset(obj);
299 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, 298 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
300 obj->size, IOMMU_READ | IOMMU_WRITE); 299 obj->size, IOMMU_READ | IOMMU_WRITE);
301 msm_obj->domain[id].iova = offset; 300 msm_obj->domain[id].iova = offset;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index c6322197db8c..4a0dce587745 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -606,14 +606,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
606 iommu = iommu_domain_alloc(&platform_bus_type); 606 iommu = iommu_domain_alloc(&platform_bus_type);
607 if (iommu) { 607 if (iommu) {
608 dev_info(drm->dev, "%s: using IOMMU\n", name); 608 dev_info(drm->dev, "%s: using IOMMU\n", name);
609 gpu->mmu = msm_iommu_new(drm, iommu); 609 gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
610 } else { 610 } else {
611 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); 611 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
612 } 612 }
613 gpu->id = msm_register_mmu(drm, gpu->mmu); 613 gpu->id = msm_register_mmu(drm, gpu->mmu);
614 614
615
615 /* Create ringbuffer: */ 616 /* Create ringbuffer: */
617 mutex_lock(&drm->struct_mutex);
616 gpu->rb = msm_ringbuffer_new(gpu, ringsz); 618 gpu->rb = msm_ringbuffer_new(gpu, ringsz);
619 mutex_unlock(&drm->struct_mutex);
617 if (IS_ERR(gpu->rb)) { 620 if (IS_ERR(gpu->rb)) {
618 ret = PTR_ERR(gpu->rb); 621 ret = PTR_ERR(gpu->rb);
619 gpu->rb = NULL; 622 gpu->rb = NULL;
@@ -621,13 +624,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
621 goto fail; 624 goto fail;
622 } 625 }
623 626
624 ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
625 if (ret) {
626 gpu->rb_iova = 0;
627 dev_err(drm->dev, "could not map ringbuffer: %d\n", ret);
628 goto fail;
629 }
630
631 bs_init(gpu); 627 bs_init(gpu);
632 628
633 return 0; 629 return 0;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 4b2ad9181edf..099af483fdf0 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -33,39 +33,14 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
33 33
34static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt) 34static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
35{ 35{
36 struct drm_device *dev = mmu->dev;
37 struct msm_iommu *iommu = to_msm_iommu(mmu); 36 struct msm_iommu *iommu = to_msm_iommu(mmu);
38 int i, ret; 37 return iommu_attach_device(iommu->domain, mmu->dev);
39
40 for (i = 0; i < cnt; i++) {
41 struct device *msm_iommu_get_ctx(const char *ctx_name);
42 struct device *ctx = msm_iommu_get_ctx(names[i]);
43 if (IS_ERR_OR_NULL(ctx)) {
44 dev_warn(dev->dev, "couldn't get %s context", names[i]);
45 continue;
46 }
47 ret = iommu_attach_device(iommu->domain, ctx);
48 if (ret) {
49 dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
50 return ret;
51 }
52 }
53
54 return 0;
55} 38}
56 39
57static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt) 40static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
58{ 41{
59 struct msm_iommu *iommu = to_msm_iommu(mmu); 42 struct msm_iommu *iommu = to_msm_iommu(mmu);
60 int i; 43 iommu_detach_device(iommu->domain, mmu->dev);
61
62 for (i = 0; i < cnt; i++) {
63 struct device *msm_iommu_get_ctx(const char *ctx_name);
64 struct device *ctx = msm_iommu_get_ctx(names[i]);
65 if (IS_ERR_OR_NULL(ctx))
66 continue;
67 iommu_detach_device(iommu->domain, ctx);
68 }
69} 44}
70 45
71static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, 46static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
@@ -149,7 +124,7 @@ static const struct msm_mmu_funcs funcs = {
149 .destroy = msm_iommu_destroy, 124 .destroy = msm_iommu_destroy,
150}; 125};
151 126
152struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain) 127struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
153{ 128{
154 struct msm_iommu *iommu; 129 struct msm_iommu *iommu;
155 130
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 21da6d154f71..7cd88d9dc155 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -32,17 +32,17 @@ struct msm_mmu_funcs {
32 32
33struct msm_mmu { 33struct msm_mmu {
34 const struct msm_mmu_funcs *funcs; 34 const struct msm_mmu_funcs *funcs;
35 struct drm_device *dev; 35 struct device *dev;
36}; 36};
37 37
38static inline void msm_mmu_init(struct msm_mmu *mmu, struct drm_device *dev, 38static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
39 const struct msm_mmu_funcs *funcs) 39 const struct msm_mmu_funcs *funcs)
40{ 40{
41 mmu->dev = dev; 41 mmu->dev = dev;
42 mmu->funcs = funcs; 42 mmu->funcs = funcs;
43} 43}
44 44
45struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain); 45struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
46struct msm_mmu *msm_gpummu_new(struct drm_device *dev, struct msm_gpu *gpu); 46struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
47 47
48#endif /* __MSM_MMU_H__ */ 48#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index b6dc85c614be..ba29a701ca1d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -309,7 +309,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
309 struct ttm_buffer_object *bo = &nvbo->bo; 309 struct ttm_buffer_object *bo = &nvbo->bo;
310 int ret; 310 int ret;
311 311
312 ret = ttm_bo_reserve(bo, false, false, false, 0); 312 ret = ttm_bo_reserve(bo, false, false, false, NULL);
313 if (ret) 313 if (ret)
314 goto out; 314 goto out;
315 315
@@ -350,7 +350,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
350 struct ttm_buffer_object *bo = &nvbo->bo; 350 struct ttm_buffer_object *bo = &nvbo->bo;
351 int ret, ref; 351 int ret, ref;
352 352
353 ret = ttm_bo_reserve(bo, false, false, false, 0); 353 ret = ttm_bo_reserve(bo, false, false, false, NULL);
354 if (ret) 354 if (ret)
355 return ret; 355 return ret;
356 356
@@ -385,7 +385,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
385{ 385{
386 int ret; 386 int ret;
387 387
388 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); 388 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
389 if (ret) 389 if (ret)
390 return ret; 390 return ret;
391 391
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 1fa222e8f007..dbdc9ad59546 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -63,7 +63,7 @@ find_encoder(struct drm_connector *connector, int type)
63{ 63{
64 struct drm_device *dev = connector->dev; 64 struct drm_device *dev = connector->dev;
65 struct nouveau_encoder *nv_encoder; 65 struct nouveau_encoder *nv_encoder;
66 struct drm_mode_object *obj; 66 struct drm_encoder *enc;
67 int i, id; 67 int i, id;
68 68
69 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 69 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
@@ -71,10 +71,10 @@ find_encoder(struct drm_connector *connector, int type)
71 if (!id) 71 if (!id)
72 break; 72 break;
73 73
74 obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER); 74 enc = drm_encoder_find(dev, id);
75 if (!obj) 75 if (!enc)
76 continue; 76 continue;
77 nv_encoder = nouveau_encoder(obj_to_encoder(obj)); 77 nv_encoder = nouveau_encoder(enc);
78 78
79 if (type == DCB_OUTPUT_ANY || 79 if (type == DCB_OUTPUT_ANY ||
80 (nv_encoder->dcb && nv_encoder->dcb->type == type)) 80 (nv_encoder->dcb && nv_encoder->dcb->type == type))
@@ -104,7 +104,7 @@ nouveau_connector_destroy(struct drm_connector *connector)
104 struct nouveau_connector *nv_connector = nouveau_connector(connector); 104 struct nouveau_connector *nv_connector = nouveau_connector(connector);
105 nouveau_event_ref(NULL, &nv_connector->hpd); 105 nouveau_event_ref(NULL, &nv_connector->hpd);
106 kfree(nv_connector->edid); 106 kfree(nv_connector->edid);
107 drm_sysfs_connector_remove(connector); 107 drm_connector_unregister(connector);
108 drm_connector_cleanup(connector); 108 drm_connector_cleanup(connector);
109 if (nv_connector->aux.transfer) 109 if (nv_connector->aux.transfer)
110 drm_dp_aux_unregister(&nv_connector->aux); 110 drm_dp_aux_unregister(&nv_connector->aux);
@@ -119,7 +119,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
119 struct nouveau_drm *drm = nouveau_drm(dev); 119 struct nouveau_drm *drm = nouveau_drm(dev);
120 struct nouveau_gpio *gpio = nouveau_gpio(drm->device); 120 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
121 struct nouveau_encoder *nv_encoder; 121 struct nouveau_encoder *nv_encoder;
122 struct drm_mode_object *obj; 122 struct drm_encoder *encoder;
123 int i, panel = -ENODEV; 123 int i, panel = -ENODEV;
124 124
125 /* eDP panels need powering on by us (if the VBIOS doesn't default it 125 /* eDP panels need powering on by us (if the VBIOS doesn't default it
@@ -139,10 +139,10 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
139 if (id == 0) 139 if (id == 0)
140 break; 140 break;
141 141
142 obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER); 142 encoder = drm_encoder_find(dev, id);
143 if (!obj) 143 if (!encoder)
144 continue; 144 continue;
145 nv_encoder = nouveau_encoder(obj_to_encoder(obj)); 145 nv_encoder = nouveau_encoder(encoder);
146 146
147 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { 147 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
148 int ret = nouveau_dp_detect(nv_encoder); 148 int ret = nouveau_dp_detect(nv_encoder);
@@ -1236,6 +1236,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
1236 1236
1237 INIT_WORK(&nv_connector->work, nouveau_connector_hotplug_work); 1237 INIT_WORK(&nv_connector->work, nouveau_connector_hotplug_work);
1238 1238
1239 drm_sysfs_connector_add(connector); 1239 drm_connector_register(connector);
1240 return connector; 1240 return connector;
1241} 1241}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 191665ee7f52..758c11cb9a9a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -438,7 +438,7 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info)
438 info->flags |= FBINFO_HWACCEL_DISABLED; 438 info->flags |= FBINFO_HWACCEL_DISABLED;
439} 439}
440 440
441static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { 441static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
442 .gamma_set = nouveau_fbcon_gamma_set, 442 .gamma_set = nouveau_fbcon_gamma_set,
443 .gamma_get = nouveau_fbcon_gamma_get, 443 .gamma_get = nouveau_fbcon_gamma_get,
444 .fb_probe = nouveau_fbcon_create, 444 .fb_probe = nouveau_fbcon_create,
@@ -464,7 +464,8 @@ nouveau_fbcon_init(struct drm_device *dev)
464 464
465 fbcon->dev = dev; 465 fbcon->dev = dev;
466 drm->fbcon = fbcon; 466 drm->fbcon = fbcon;
467 fbcon->helper.funcs = &nouveau_fbcon_helper_funcs; 467
468 drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
468 469
469 ret = drm_fb_helper_init(dev, &fbcon->helper, 470 ret = drm_fb_helper_init(dev, &fbcon->helper,
470 dev->mode_config.num_crtc, 4); 471 dev->mode_config.num_crtc, 4);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index c90c0dc0afe8..df9d451afdcd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -61,7 +61,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
61 if (!cli->base.vm) 61 if (!cli->base.vm)
62 return 0; 62 return 0;
63 63
64 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); 64 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
65 if (ret) 65 if (ret)
66 return ret; 66 return ret;
67 67
@@ -132,7 +132,7 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
132 if (!cli->base.vm) 132 if (!cli->base.vm)
133 return; 133 return;
134 134
135 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); 135 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
136 if (ret) 136 if (ret)
137 return; 137 return;
138 138
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index ab0228f640a5..7e185c122750 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -76,6 +76,7 @@ static int
76nouveau_vram_manager_new(struct ttm_mem_type_manager *man, 76nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
77 struct ttm_buffer_object *bo, 77 struct ttm_buffer_object *bo,
78 struct ttm_placement *placement, 78 struct ttm_placement *placement,
79 uint32_t flags,
79 struct ttm_mem_reg *mem) 80 struct ttm_mem_reg *mem)
80{ 81{
81 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 82 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
@@ -162,6 +163,7 @@ static int
162nouveau_gart_manager_new(struct ttm_mem_type_manager *man, 163nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
163 struct ttm_buffer_object *bo, 164 struct ttm_buffer_object *bo,
164 struct ttm_placement *placement, 165 struct ttm_placement *placement,
166 uint32_t flags,
165 struct ttm_mem_reg *mem) 167 struct ttm_mem_reg *mem)
166{ 168{
167 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 169 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
@@ -242,6 +244,7 @@ static int
242nv04_gart_manager_new(struct ttm_mem_type_manager *man, 244nv04_gart_manager_new(struct ttm_mem_type_manager *man,
243 struct ttm_buffer_object *bo, 245 struct ttm_buffer_object *bo,
244 struct ttm_placement *placement, 246 struct ttm_placement *placement,
247 uint32_t flags,
245 struct ttm_mem_reg *mem) 248 struct ttm_mem_reg *mem)
246{ 249{
247 struct nouveau_mem *node; 250 struct nouveau_mem *node;
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 86f4ead0441d..36bc5cc80816 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -130,7 +130,7 @@ static void omap_connector_destroy(struct drm_connector *connector)
130 struct omap_dss_device *dssdev = omap_connector->dssdev; 130 struct omap_dss_device *dssdev = omap_connector->dssdev;
131 131
132 DBG("%s", omap_connector->dssdev->name); 132 DBG("%s", omap_connector->dssdev->name);
133 drm_sysfs_connector_remove(connector); 133 drm_connector_unregister(connector);
134 drm_connector_cleanup(connector); 134 drm_connector_cleanup(connector);
135 kfree(omap_connector); 135 kfree(omap_connector);
136 136
@@ -307,7 +307,7 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
307 connector->interlace_allowed = 1; 307 connector->interlace_allowed = 1;
308 connector->doublescan_allowed = 0; 308 connector->doublescan_allowed = 0;
309 309
310 drm_sysfs_connector_add(connector); 310 drm_connector_register(connector);
311 311
312 return connector; 312 return connector;
313 313
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index f926b4caf449..56c60552abba 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -199,7 +199,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
199static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area, 199static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
200 struct page **pages, uint32_t npages, uint32_t roll) 200 struct page **pages, uint32_t npages, uint32_t roll)
201{ 201{
202 dma_addr_t pat_pa = 0; 202 dma_addr_t pat_pa = 0, data_pa = 0;
203 uint32_t *data; 203 uint32_t *data;
204 struct pat *pat; 204 struct pat *pat;
205 struct refill_engine *engine = txn->engine_handle; 205 struct refill_engine *engine = txn->engine_handle;
@@ -223,7 +223,9 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
223 .lut_id = engine->tcm->lut_id, 223 .lut_id = engine->tcm->lut_id,
224 }; 224 };
225 225
226 data = alloc_dma(txn, 4*i, &pat->data_pa); 226 data = alloc_dma(txn, 4*i, &data_pa);
227 /* FIXME: what if data_pa is more than 32-bit ? */
228 pat->data_pa = data_pa;
227 229
228 while (i--) { 230 while (i--) {
229 int n = i + roll; 231 int n = i + roll;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 284b80fc3c54..b08a450d1b5d 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -119,13 +119,6 @@ struct omap_drm_private {
119 struct omap_drm_irq error_handler; 119 struct omap_drm_irq error_handler;
120}; 120};
121 121
122/* this should probably be in drm-core to standardize amongst drivers */
123#define DRM_ROTATE_0 0
124#define DRM_ROTATE_90 1
125#define DRM_ROTATE_180 2
126#define DRM_ROTATE_270 3
127#define DRM_REFLECT_X 4
128#define DRM_REFLECT_Y 5
129 122
130#ifdef CONFIG_DEBUG_FS 123#ifdef CONFIG_DEBUG_FS
131int omap_debugfs_init(struct drm_minor *minor); 124int omap_debugfs_init(struct drm_minor *minor);
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 1388ca7f87e8..8436c6857cda 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -281,7 +281,7 @@ fail:
281 return ret; 281 return ret;
282} 282}
283 283
284static struct drm_fb_helper_funcs omap_fb_helper_funcs = { 284static const struct drm_fb_helper_funcs omap_fb_helper_funcs = {
285 .fb_probe = omap_fbdev_create, 285 .fb_probe = omap_fbdev_create,
286}; 286};
287 287
@@ -325,7 +325,7 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
325 325
326 helper = &fbdev->base; 326 helper = &fbdev->base;
327 327
328 helper->funcs = &omap_fb_helper_funcs; 328 drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs);
329 329
330 ret = drm_fb_helper_init(dev, helper, 330 ret = drm_fb_helper_init(dev, helper,
331 priv->num_crtcs, priv->num_connectors); 331 priv->num_crtcs, priv->num_connectors);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 95dbce286a41..e4849413ee80 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -233,11 +233,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
233 233
234 WARN_ON(omap_obj->pages); 234 WARN_ON(omap_obj->pages);
235 235
236 /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the 236 pages = drm_gem_get_pages(obj);
237 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
238 * we actually want CMA memory for it all anyways..
239 */
240 pages = drm_gem_get_pages(obj, GFP_KERNEL);
241 if (IS_ERR(pages)) { 237 if (IS_ERR(pages)) {
242 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); 238 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
243 return PTR_ERR(pages); 239 return PTR_ERR(pages);
@@ -791,7 +787,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,
791 omap_obj->paddr = tiler_ssptr(block); 787 omap_obj->paddr = tiler_ssptr(block);
792 omap_obj->block = block; 788 omap_obj->block = block;
793 789
794 DBG("got paddr: %08x", omap_obj->paddr); 790 DBG("got paddr: %pad", &omap_obj->paddr);
795 } 791 }
796 792
797 omap_obj->paddr_cnt++; 793 omap_obj->paddr_cnt++;
@@ -985,9 +981,9 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
985 981
986 off = drm_vma_node_start(&obj->vma_node); 982 off = drm_vma_node_start(&obj->vma_node);
987 983
988 seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d", 984 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
989 omap_obj->flags, obj->name, obj->refcount.refcount.counter, 985 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
990 off, omap_obj->paddr, omap_obj->paddr_cnt, 986 off, &omap_obj->paddr, omap_obj->paddr_cnt,
991 omap_obj->vaddr, omap_obj->roll); 987 omap_obj->vaddr, omap_obj->roll);
992 988
993 if (omap_obj->flags & OMAP_BO_TILED) { 989 if (omap_obj->flags & OMAP_BO_TILED) {
@@ -1183,9 +1179,7 @@ int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1183 } 1179 }
1184 } 1180 }
1185 spin_unlock(&sync_lock); 1181 spin_unlock(&sync_lock);
1186 1182 kfree(waiter);
1187 if (waiter)
1188 kfree(waiter);
1189 } 1183 }
1190 return ret; 1184 return ret;
1191} 1185}
@@ -1347,6 +1341,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1347 struct omap_drm_private *priv = dev->dev_private; 1341 struct omap_drm_private *priv = dev->dev_private;
1348 struct omap_gem_object *omap_obj; 1342 struct omap_gem_object *omap_obj;
1349 struct drm_gem_object *obj = NULL; 1343 struct drm_gem_object *obj = NULL;
1344 struct address_space *mapping;
1350 size_t size; 1345 size_t size;
1351 int ret; 1346 int ret;
1352 1347
@@ -1404,14 +1399,16 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1404 omap_obj->height = gsize.tiled.height; 1399 omap_obj->height = gsize.tiled.height;
1405 } 1400 }
1406 1401
1407 ret = 0; 1402 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
1408 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
1409 drm_gem_private_object_init(dev, obj, size); 1403 drm_gem_private_object_init(dev, obj, size);
1410 else 1404 } else {
1411 ret = drm_gem_object_init(dev, obj, size); 1405 ret = drm_gem_object_init(dev, obj, size);
1406 if (ret)
1407 goto fail;
1412 1408
1413 if (ret) 1409 mapping = file_inode(obj->filp)->i_mapping;
1414 goto fail; 1410 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1411 }
1415 1412
1416 return obj; 1413 return obj;
1417 1414
@@ -1467,8 +1464,8 @@ void omap_gem_init(struct drm_device *dev)
1467 entry->paddr = tiler_ssptr(block); 1464 entry->paddr = tiler_ssptr(block);
1468 entry->block = block; 1465 entry->block = block;
1469 1466
1470 DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h, 1467 DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
1471 entry->paddr, 1468 &entry->paddr,
1472 usergart[i].stride_pfn << PAGE_SHIFT); 1469 usergart[i].stride_pfn << PAGE_SHIFT);
1473 } 1470 }
1474 } 1471 }
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 3cf31ee59aac..891a4dc608af 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -142,8 +142,8 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply)
142 DBG("%dx%d -> %dx%d (%d)", info->width, info->height, 142 DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
143 info->out_width, info->out_height, 143 info->out_width, info->out_height,
144 info->screen_width); 144 info->screen_width);
145 DBG("%d,%d %08x %08x", info->pos_x, info->pos_y, 145 DBG("%d,%d %pad %pad", info->pos_x, info->pos_y,
146 info->paddr, info->p_uv_addr); 146 &info->paddr, &info->p_uv_addr);
147 147
148 /* TODO: */ 148 /* TODO: */
149 ilace = false; 149 ilace = false;
@@ -308,16 +308,13 @@ void omap_plane_install_properties(struct drm_plane *plane,
308 if (priv->has_dmm) { 308 if (priv->has_dmm) {
309 prop = priv->rotation_prop; 309 prop = priv->rotation_prop;
310 if (!prop) { 310 if (!prop) {
311 const struct drm_prop_enum_list props[] = { 311 prop = drm_mode_create_rotation_property(dev,
312 { DRM_ROTATE_0, "rotate-0" }, 312 BIT(DRM_ROTATE_0) |
313 { DRM_ROTATE_90, "rotate-90" }, 313 BIT(DRM_ROTATE_90) |
314 { DRM_ROTATE_180, "rotate-180" }, 314 BIT(DRM_ROTATE_180) |
315 { DRM_ROTATE_270, "rotate-270" }, 315 BIT(DRM_ROTATE_270) |
316 { DRM_REFLECT_X, "reflect-x" }, 316 BIT(DRM_REFLECT_X) |
317 { DRM_REFLECT_Y, "reflect-y" }, 317 BIT(DRM_REFLECT_Y));
318 };
319 prop = drm_property_create_bitmask(dev, 0, "rotation",
320 props, ARRAY_SIZE(props));
321 if (prop == NULL) 318 if (prop == NULL)
322 return; 319 return;
323 priv->rotation_prop = prop; 320 priv->rotation_prop = prop;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 4ec874da5668..bee9f72b3a93 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -5,7 +5,7 @@ config DRM_PANEL
5 Panel registration and lookup framework. 5 Panel registration and lookup framework.
6 6
7menu "Display Panels" 7menu "Display Panels"
8 depends on DRM_PANEL 8 depends on DRM && DRM_PANEL
9 9
10config DRM_PANEL_SIMPLE 10config DRM_PANEL_SIMPLE
11 tristate "support for simple panels" 11 tristate "support for simple panels"
@@ -18,14 +18,11 @@ config DRM_PANEL_SIMPLE
18 18
19config DRM_PANEL_LD9040 19config DRM_PANEL_LD9040
20 tristate "LD9040 RGB/SPI panel" 20 tristate "LD9040 RGB/SPI panel"
21 depends on DRM && DRM_PANEL 21 depends on OF && SPI
22 depends on OF
23 select SPI
24 select VIDEOMODE_HELPERS 22 select VIDEOMODE_HELPERS
25 23
26config DRM_PANEL_S6E8AA0 24config DRM_PANEL_S6E8AA0
27 tristate "S6E8AA0 DSI video mode panel" 25 tristate "S6E8AA0 DSI video mode panel"
28 depends on DRM && DRM_PANEL
29 depends on OF 26 depends on OF
30 select DRM_MIPI_DSI 27 select DRM_MIPI_DSI
31 select VIDEOMODE_HELPERS 28 select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-ld9040.c
index db1601fdbe29..42ac67b21e9f 100644
--- a/drivers/gpu/drm/panel/panel-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-ld9040.c
@@ -110,7 +110,10 @@ struct ld9040 {
110 int error; 110 int error;
111}; 111};
112 112
113#define panel_to_ld9040(p) container_of(p, struct ld9040, panel) 113static inline struct ld9040 *panel_to_ld9040(struct drm_panel *panel)
114{
115 return container_of(panel, struct ld9040, panel);
116}
114 117
115static int ld9040_clear_error(struct ld9040 *ctx) 118static int ld9040_clear_error(struct ld9040 *ctx)
116{ 119{
@@ -216,6 +219,11 @@ static int ld9040_power_off(struct ld9040 *ctx)
216 219
217static int ld9040_disable(struct drm_panel *panel) 220static int ld9040_disable(struct drm_panel *panel)
218{ 221{
222 return 0;
223}
224
225static int ld9040_unprepare(struct drm_panel *panel)
226{
219 struct ld9040 *ctx = panel_to_ld9040(panel); 227 struct ld9040 *ctx = panel_to_ld9040(panel);
220 228
221 msleep(120); 229 msleep(120);
@@ -228,7 +236,7 @@ static int ld9040_disable(struct drm_panel *panel)
228 return ld9040_power_off(ctx); 236 return ld9040_power_off(ctx);
229} 237}
230 238
231static int ld9040_enable(struct drm_panel *panel) 239static int ld9040_prepare(struct drm_panel *panel)
232{ 240{
233 struct ld9040 *ctx = panel_to_ld9040(panel); 241 struct ld9040 *ctx = panel_to_ld9040(panel);
234 int ret; 242 int ret;
@@ -242,11 +250,16 @@ static int ld9040_enable(struct drm_panel *panel)
242 ret = ld9040_clear_error(ctx); 250 ret = ld9040_clear_error(ctx);
243 251
244 if (ret < 0) 252 if (ret < 0)
245 ld9040_disable(panel); 253 ld9040_unprepare(panel);
246 254
247 return ret; 255 return ret;
248} 256}
249 257
258static int ld9040_enable(struct drm_panel *panel)
259{
260 return 0;
261}
262
250static int ld9040_get_modes(struct drm_panel *panel) 263static int ld9040_get_modes(struct drm_panel *panel)
251{ 264{
252 struct drm_connector *connector = panel->connector; 265 struct drm_connector *connector = panel->connector;
@@ -273,6 +286,8 @@ static int ld9040_get_modes(struct drm_panel *panel)
273 286
274static const struct drm_panel_funcs ld9040_drm_funcs = { 287static const struct drm_panel_funcs ld9040_drm_funcs = {
275 .disable = ld9040_disable, 288 .disable = ld9040_disable,
289 .unprepare = ld9040_unprepare,
290 .prepare = ld9040_prepare,
276 .enable = ld9040_enable, 291 .enable = ld9040_enable,
277 .get_modes = ld9040_get_modes, 292 .get_modes = ld9040_get_modes,
278}; 293};
diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-s6e8aa0.c
index 06e57a26db7a..b5217fe37f02 100644
--- a/drivers/gpu/drm/panel/panel-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-s6e8aa0.c
@@ -120,7 +120,10 @@ struct s6e8aa0 {
120 int error; 120 int error;
121}; 121};
122 122
123#define panel_to_s6e8aa0(p) container_of(p, struct s6e8aa0, panel) 123static inline struct s6e8aa0 *panel_to_s6e8aa0(struct drm_panel *panel)
124{
125 return container_of(panel, struct s6e8aa0, panel);
126}
124 127
125static int s6e8aa0_clear_error(struct s6e8aa0 *ctx) 128static int s6e8aa0_clear_error(struct s6e8aa0 *ctx)
126{ 129{
@@ -133,14 +136,14 @@ static int s6e8aa0_clear_error(struct s6e8aa0 *ctx)
133static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len) 136static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len)
134{ 137{
135 struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); 138 struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
136 int ret; 139 ssize_t ret;
137 140
138 if (ctx->error < 0) 141 if (ctx->error < 0)
139 return; 142 return;
140 143
141 ret = mipi_dsi_dcs_write(dsi, dsi->channel, data, len); 144 ret = mipi_dsi_dcs_write(dsi, data, len);
142 if (ret < 0) { 145 if (ret < 0) {
143 dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret, len, 146 dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret, len,
144 data); 147 data);
145 ctx->error = ret; 148 ctx->error = ret;
146 } 149 }
@@ -154,7 +157,7 @@ static int s6e8aa0_dcs_read(struct s6e8aa0 *ctx, u8 cmd, void *data, size_t len)
154 if (ctx->error < 0) 157 if (ctx->error < 0)
155 return ctx->error; 158 return ctx->error;
156 159
157 ret = mipi_dsi_dcs_read(dsi, dsi->channel, cmd, data, len); 160 ret = mipi_dsi_dcs_read(dsi, cmd, data, len);
158 if (ret < 0) { 161 if (ret < 0) {
159 dev_err(ctx->dev, "error %d reading dcs seq(%#x)\n", ret, cmd); 162 dev_err(ctx->dev, "error %d reading dcs seq(%#x)\n", ret, cmd);
160 ctx->error = ret; 163 ctx->error = ret;
@@ -889,6 +892,11 @@ static int s6e8aa0_power_off(struct s6e8aa0 *ctx)
889 892
890static int s6e8aa0_disable(struct drm_panel *panel) 893static int s6e8aa0_disable(struct drm_panel *panel)
891{ 894{
895 return 0;
896}
897
898static int s6e8aa0_unprepare(struct drm_panel *panel)
899{
892 struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel); 900 struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
893 901
894 s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE); 902 s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
@@ -900,7 +908,7 @@ static int s6e8aa0_disable(struct drm_panel *panel)
900 return s6e8aa0_power_off(ctx); 908 return s6e8aa0_power_off(ctx);
901} 909}
902 910
903static int s6e8aa0_enable(struct drm_panel *panel) 911static int s6e8aa0_prepare(struct drm_panel *panel)
904{ 912{
905 struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel); 913 struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
906 int ret; 914 int ret;
@@ -913,11 +921,16 @@ static int s6e8aa0_enable(struct drm_panel *panel)
913 ret = ctx->error; 921 ret = ctx->error;
914 922
915 if (ret < 0) 923 if (ret < 0)
916 s6e8aa0_disable(panel); 924 s6e8aa0_unprepare(panel);
917 925
918 return ret; 926 return ret;
919} 927}
920 928
929static int s6e8aa0_enable(struct drm_panel *panel)
930{
931 return 0;
932}
933
921static int s6e8aa0_get_modes(struct drm_panel *panel) 934static int s6e8aa0_get_modes(struct drm_panel *panel)
922{ 935{
923 struct drm_connector *connector = panel->connector; 936 struct drm_connector *connector = panel->connector;
@@ -944,6 +957,8 @@ static int s6e8aa0_get_modes(struct drm_panel *panel)
944 957
945static const struct drm_panel_funcs s6e8aa0_drm_funcs = { 958static const struct drm_panel_funcs s6e8aa0_drm_funcs = {
946 .disable = s6e8aa0_disable, 959 .disable = s6e8aa0_disable,
960 .unprepare = s6e8aa0_unprepare,
961 .prepare = s6e8aa0_prepare,
947 .enable = s6e8aa0_enable, 962 .enable = s6e8aa0_enable,
948 .get_modes = s6e8aa0_get_modes, 963 .get_modes = s6e8aa0_get_modes,
949}; 964};
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index a25136132c31..4ce1db0a68ff 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -37,14 +37,35 @@ struct panel_desc {
37 const struct drm_display_mode *modes; 37 const struct drm_display_mode *modes;
38 unsigned int num_modes; 38 unsigned int num_modes;
39 39
40 unsigned int bpc;
41
40 struct { 42 struct {
41 unsigned int width; 43 unsigned int width;
42 unsigned int height; 44 unsigned int height;
43 } size; 45 } size;
46
47 /**
48 * @prepare: the time (in milliseconds) that it takes for the panel to
49 * become ready and start receiving video data
50 * @enable: the time (in milliseconds) that it takes for the panel to
51 * display the first valid frame after starting to receive
52 * video data
53 * @disable: the time (in milliseconds) that it takes for the panel to
54 * turn the display off (no content is visible)
55 * @unprepare: the time (in milliseconds) that it takes for the panel
56 * to power itself down completely
57 */
58 struct {
59 unsigned int prepare;
60 unsigned int enable;
61 unsigned int disable;
62 unsigned int unprepare;
63 } delay;
44}; 64};
45 65
46struct panel_simple { 66struct panel_simple {
47 struct drm_panel base; 67 struct drm_panel base;
68 bool prepared;
48 bool enabled; 69 bool enabled;
49 70
50 const struct panel_desc *desc; 71 const struct panel_desc *desc;
@@ -87,6 +108,7 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
87 num++; 108 num++;
88 } 109 }
89 110
111 connector->display_info.bpc = panel->desc->bpc;
90 connector->display_info.width_mm = panel->desc->size.width; 112 connector->display_info.width_mm = panel->desc->size.width;
91 connector->display_info.height_mm = panel->desc->size.height; 113 connector->display_info.height_mm = panel->desc->size.height;
92 114
@@ -105,21 +127,40 @@ static int panel_simple_disable(struct drm_panel *panel)
105 backlight_update_status(p->backlight); 127 backlight_update_status(p->backlight);
106 } 128 }
107 129
130 if (p->desc->delay.disable)
131 msleep(p->desc->delay.disable);
132
133 p->enabled = false;
134
135 return 0;
136}
137
138static int panel_simple_unprepare(struct drm_panel *panel)
139{
140 struct panel_simple *p = to_panel_simple(panel);
141
142 if (!p->prepared)
143 return 0;
144
108 if (p->enable_gpio) 145 if (p->enable_gpio)
109 gpiod_set_value_cansleep(p->enable_gpio, 0); 146 gpiod_set_value_cansleep(p->enable_gpio, 0);
110 147
111 regulator_disable(p->supply); 148 regulator_disable(p->supply);
112 p->enabled = false; 149
150 if (p->desc->delay.unprepare)
151 msleep(p->desc->delay.unprepare);
152
153 p->prepared = false;
113 154
114 return 0; 155 return 0;
115} 156}
116 157
117static int panel_simple_enable(struct drm_panel *panel) 158static int panel_simple_prepare(struct drm_panel *panel)
118{ 159{
119 struct panel_simple *p = to_panel_simple(panel); 160 struct panel_simple *p = to_panel_simple(panel);
120 int err; 161 int err;
121 162
122 if (p->enabled) 163 if (p->prepared)
123 return 0; 164 return 0;
124 165
125 err = regulator_enable(p->supply); 166 err = regulator_enable(p->supply);
@@ -131,6 +172,24 @@ static int panel_simple_enable(struct drm_panel *panel)
131 if (p->enable_gpio) 172 if (p->enable_gpio)
132 gpiod_set_value_cansleep(p->enable_gpio, 1); 173 gpiod_set_value_cansleep(p->enable_gpio, 1);
133 174
175 if (p->desc->delay.prepare)
176 msleep(p->desc->delay.prepare);
177
178 p->prepared = true;
179
180 return 0;
181}
182
183static int panel_simple_enable(struct drm_panel *panel)
184{
185 struct panel_simple *p = to_panel_simple(panel);
186
187 if (p->enabled)
188 return 0;
189
190 if (p->desc->delay.enable)
191 msleep(p->desc->delay.enable);
192
134 if (p->backlight) { 193 if (p->backlight) {
135 p->backlight->props.power = FB_BLANK_UNBLANK; 194 p->backlight->props.power = FB_BLANK_UNBLANK;
136 backlight_update_status(p->backlight); 195 backlight_update_status(p->backlight);
@@ -164,6 +223,8 @@ static int panel_simple_get_modes(struct drm_panel *panel)
164 223
165static const struct drm_panel_funcs panel_simple_funcs = { 224static const struct drm_panel_funcs panel_simple_funcs = {
166 .disable = panel_simple_disable, 225 .disable = panel_simple_disable,
226 .unprepare = panel_simple_unprepare,
227 .prepare = panel_simple_prepare,
167 .enable = panel_simple_enable, 228 .enable = panel_simple_enable,
168 .get_modes = panel_simple_get_modes, 229 .get_modes = panel_simple_get_modes,
169}; 230};
@@ -179,22 +240,21 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
179 return -ENOMEM; 240 return -ENOMEM;
180 241
181 panel->enabled = false; 242 panel->enabled = false;
243 panel->prepared = false;
182 panel->desc = desc; 244 panel->desc = desc;
183 245
184 panel->supply = devm_regulator_get(dev, "power"); 246 panel->supply = devm_regulator_get(dev, "power");
185 if (IS_ERR(panel->supply)) 247 if (IS_ERR(panel->supply))
186 return PTR_ERR(panel->supply); 248 return PTR_ERR(panel->supply);
187 249
188 panel->enable_gpio = devm_gpiod_get(dev, "enable"); 250 panel->enable_gpio = devm_gpiod_get_optional(dev, "enable");
189 if (IS_ERR(panel->enable_gpio)) { 251 if (IS_ERR(panel->enable_gpio)) {
190 err = PTR_ERR(panel->enable_gpio); 252 err = PTR_ERR(panel->enable_gpio);
191 if (err != -ENOENT) { 253 dev_err(dev, "failed to request GPIO: %d\n", err);
192 dev_err(dev, "failed to request GPIO: %d\n", err); 254 return err;
193 return err; 255 }
194 }
195 256
196 panel->enable_gpio = NULL; 257 if (panel->enable_gpio) {
197 } else {
198 err = gpiod_direction_output(panel->enable_gpio, 0); 258 err = gpiod_direction_output(panel->enable_gpio, 0);
199 if (err < 0) { 259 if (err < 0) {
200 dev_err(dev, "failed to setup GPIO: %d\n", err); 260 dev_err(dev, "failed to setup GPIO: %d\n", err);
@@ -285,6 +345,7 @@ static const struct drm_display_mode auo_b101aw03_mode = {
285static const struct panel_desc auo_b101aw03 = { 345static const struct panel_desc auo_b101aw03 = {
286 .modes = &auo_b101aw03_mode, 346 .modes = &auo_b101aw03_mode,
287 .num_modes = 1, 347 .num_modes = 1,
348 .bpc = 6,
288 .size = { 349 .size = {
289 .width = 223, 350 .width = 223,
290 .height = 125, 351 .height = 125,
@@ -307,12 +368,40 @@ static const struct drm_display_mode auo_b133xtn01_mode = {
307static const struct panel_desc auo_b133xtn01 = { 368static const struct panel_desc auo_b133xtn01 = {
308 .modes = &auo_b133xtn01_mode, 369 .modes = &auo_b133xtn01_mode,
309 .num_modes = 1, 370 .num_modes = 1,
371 .bpc = 6,
310 .size = { 372 .size = {
311 .width = 293, 373 .width = 293,
312 .height = 165, 374 .height = 165,
313 }, 375 },
314}; 376};
315 377
378static const struct drm_display_mode auo_b133htn01_mode = {
379 .clock = 150660,
380 .hdisplay = 1920,
381 .hsync_start = 1920 + 172,
382 .hsync_end = 1920 + 172 + 80,
383 .htotal = 1920 + 172 + 80 + 60,
384 .vdisplay = 1080,
385 .vsync_start = 1080 + 25,
386 .vsync_end = 1080 + 25 + 10,
387 .vtotal = 1080 + 25 + 10 + 10,
388 .vrefresh = 60,
389};
390
391static const struct panel_desc auo_b133htn01 = {
392 .modes = &auo_b133htn01_mode,
393 .num_modes = 1,
394 .size = {
395 .width = 293,
396 .height = 165,
397 },
398 .delay = {
399 .prepare = 105,
400 .enable = 20,
401 .unprepare = 50,
402 },
403};
404
316static const struct drm_display_mode chunghwa_claa101wa01a_mode = { 405static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
317 .clock = 72070, 406 .clock = 72070,
318 .hdisplay = 1366, 407 .hdisplay = 1366,
@@ -329,6 +418,7 @@ static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
329static const struct panel_desc chunghwa_claa101wa01a = { 418static const struct panel_desc chunghwa_claa101wa01a = {
330 .modes = &chunghwa_claa101wa01a_mode, 419 .modes = &chunghwa_claa101wa01a_mode,
331 .num_modes = 1, 420 .num_modes = 1,
421 .bpc = 6,
332 .size = { 422 .size = {
333 .width = 220, 423 .width = 220,
334 .height = 120, 424 .height = 120,
@@ -351,6 +441,7 @@ static const struct drm_display_mode chunghwa_claa101wb01_mode = {
351static const struct panel_desc chunghwa_claa101wb01 = { 441static const struct panel_desc chunghwa_claa101wb01 = {
352 .modes = &chunghwa_claa101wb01_mode, 442 .modes = &chunghwa_claa101wb01_mode,
353 .num_modes = 1, 443 .num_modes = 1,
444 .bpc = 6,
354 .size = { 445 .size = {
355 .width = 223, 446 .width = 223,
356 .height = 125, 447 .height = 125,
@@ -374,6 +465,7 @@ static const struct drm_display_mode edt_et057090dhu_mode = {
374static const struct panel_desc edt_et057090dhu = { 465static const struct panel_desc edt_et057090dhu = {
375 .modes = &edt_et057090dhu_mode, 466 .modes = &edt_et057090dhu_mode,
376 .num_modes = 1, 467 .num_modes = 1,
468 .bpc = 6,
377 .size = { 469 .size = {
378 .width = 115, 470 .width = 115,
379 .height = 86, 471 .height = 86,
@@ -397,12 +489,82 @@ static const struct drm_display_mode edt_etm0700g0dh6_mode = {
397static const struct panel_desc edt_etm0700g0dh6 = { 489static const struct panel_desc edt_etm0700g0dh6 = {
398 .modes = &edt_etm0700g0dh6_mode, 490 .modes = &edt_etm0700g0dh6_mode,
399 .num_modes = 1, 491 .num_modes = 1,
492 .bpc = 6,
400 .size = { 493 .size = {
401 .width = 152, 494 .width = 152,
402 .height = 91, 495 .height = 91,
403 }, 496 },
404}; 497};
405 498
499static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
500 .clock = 32260,
501 .hdisplay = 800,
502 .hsync_start = 800 + 168,
503 .hsync_end = 800 + 168 + 64,
504 .htotal = 800 + 168 + 64 + 88,
505 .vdisplay = 480,
506 .vsync_start = 480 + 37,
507 .vsync_end = 480 + 37 + 2,
508 .vtotal = 480 + 37 + 2 + 8,
509 .vrefresh = 60,
510};
511
512static const struct panel_desc foxlink_fl500wvr00_a0t = {
513 .modes = &foxlink_fl500wvr00_a0t_mode,
514 .num_modes = 1,
515 .size = {
516 .width = 108,
517 .height = 65,
518 },
519};
520
521static const struct drm_display_mode innolux_n116bge_mode = {
522 .clock = 71000,
523 .hdisplay = 1366,
524 .hsync_start = 1366 + 64,
525 .hsync_end = 1366 + 64 + 6,
526 .htotal = 1366 + 64 + 6 + 64,
527 .vdisplay = 768,
528 .vsync_start = 768 + 8,
529 .vsync_end = 768 + 8 + 4,
530 .vtotal = 768 + 8 + 4 + 8,
531 .vrefresh = 60,
532 .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
533};
534
535static const struct panel_desc innolux_n116bge = {
536 .modes = &innolux_n116bge_mode,
537 .num_modes = 1,
538 .bpc = 6,
539 .size = {
540 .width = 256,
541 .height = 144,
542 },
543};
544
545static const struct drm_display_mode innolux_n156bge_l21_mode = {
546 .clock = 69300,
547 .hdisplay = 1366,
548 .hsync_start = 1366 + 16,
549 .hsync_end = 1366 + 16 + 34,
550 .htotal = 1366 + 16 + 34 + 50,
551 .vdisplay = 768,
552 .vsync_start = 768 + 2,
553 .vsync_end = 768 + 2 + 6,
554 .vtotal = 768 + 2 + 6 + 12,
555 .vrefresh = 60,
556};
557
558static const struct panel_desc innolux_n156bge_l21 = {
559 .modes = &innolux_n156bge_l21_mode,
560 .num_modes = 1,
561 .bpc = 6,
562 .size = {
563 .width = 344,
564 .height = 193,
565 },
566};
567
406static const struct drm_display_mode lg_lp129qe_mode = { 568static const struct drm_display_mode lg_lp129qe_mode = {
407 .clock = 285250, 569 .clock = 285250,
408 .hdisplay = 2560, 570 .hdisplay = 2560,
@@ -419,6 +581,7 @@ static const struct drm_display_mode lg_lp129qe_mode = {
419static const struct panel_desc lg_lp129qe = { 581static const struct panel_desc lg_lp129qe = {
420 .modes = &lg_lp129qe_mode, 582 .modes = &lg_lp129qe_mode,
421 .num_modes = 1, 583 .num_modes = 1,
584 .bpc = 8,
422 .size = { 585 .size = {
423 .width = 272, 586 .width = 272,
424 .height = 181, 587 .height = 181,
@@ -441,6 +604,7 @@ static const struct drm_display_mode samsung_ltn101nt05_mode = {
441static const struct panel_desc samsung_ltn101nt05 = { 604static const struct panel_desc samsung_ltn101nt05 = {
442 .modes = &samsung_ltn101nt05_mode, 605 .modes = &samsung_ltn101nt05_mode,
443 .num_modes = 1, 606 .num_modes = 1,
607 .bpc = 6,
444 .size = { 608 .size = {
445 .width = 1024, 609 .width = 1024,
446 .height = 600, 610 .height = 600,
@@ -452,6 +616,9 @@ static const struct of_device_id platform_of_match[] = {
452 .compatible = "auo,b101aw03", 616 .compatible = "auo,b101aw03",
453 .data = &auo_b101aw03, 617 .data = &auo_b101aw03,
454 }, { 618 }, {
619 .compatible = "auo,b133htn01",
620 .data = &auo_b133htn01,
621 }, {
455 .compatible = "auo,b133xtn01", 622 .compatible = "auo,b133xtn01",
456 .data = &auo_b133xtn01, 623 .data = &auo_b133xtn01,
457 }, { 624 }, {
@@ -470,14 +637,21 @@ static const struct of_device_id platform_of_match[] = {
470 .compatible = "edt,etm0700g0dh6", 637 .compatible = "edt,etm0700g0dh6",
471 .data = &edt_etm0700g0dh6, 638 .data = &edt_etm0700g0dh6,
472 }, { 639 }, {
640 .compatible = "foxlink,fl500wvr00-a0t",
641 .data = &foxlink_fl500wvr00_a0t,
642 }, {
643 .compatible = "innolux,n116bge",
644 .data = &innolux_n116bge,
645 }, {
646 .compatible = "innolux,n156bge-l21",
647 .data = &innolux_n156bge_l21,
648 }, {
473 .compatible = "lg,lp129qe", 649 .compatible = "lg,lp129qe",
474 .data = &lg_lp129qe, 650 .data = &lg_lp129qe,
475 }, { 651 }, {
476 .compatible = "samsung,ltn101nt05", 652 .compatible = "samsung,ltn101nt05",
477 .data = &samsung_ltn101nt05, 653 .data = &samsung_ltn101nt05,
478 }, { 654 }, {
479 .compatible = "simple-panel",
480 }, {
481 /* sentinel */ 655 /* sentinel */
482 } 656 }
483}; 657};
@@ -545,7 +719,7 @@ static const struct panel_desc_dsi lg_ld070wx3_sl01 = {
545 .height = 151, 719 .height = 151,
546 }, 720 },
547 }, 721 },
548 .flags = MIPI_DSI_MODE_VIDEO, 722 .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS,
549 .format = MIPI_DSI_FMT_RGB888, 723 .format = MIPI_DSI_FMT_RGB888,
550 .lanes = 4, 724 .lanes = 4,
551}; 725};
@@ -599,7 +773,8 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
599 .height = 136, 773 .height = 136,
600 }, 774 },
601 }, 775 },
602 .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE, 776 .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
777 MIPI_DSI_CLOCK_NON_CONTINUOUS,
603 .format = MIPI_DSI_FMT_RGB888, 778 .format = MIPI_DSI_FMT_RGB888,
604 .lanes = 4, 779 .lanes = 4,
605}; 780};
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 5d7ea2461852..b8ced08b6291 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -835,7 +835,7 @@ static void qxl_conn_destroy(struct drm_connector *connector)
835 struct qxl_output *qxl_output = 835 struct qxl_output *qxl_output =
836 drm_connector_to_qxl_output(connector); 836 drm_connector_to_qxl_output(connector);
837 837
838 drm_sysfs_connector_remove(connector); 838 drm_connector_unregister(connector);
839 drm_connector_cleanup(connector); 839 drm_connector_cleanup(connector);
840 kfree(qxl_output); 840 kfree(qxl_output);
841} 841}
@@ -902,7 +902,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
902 902
903 drm_object_attach_property(&connector->base, 903 drm_object_attach_property(&connector->base,
904 qdev->hotplug_mode_update_property, 0); 904 qdev->hotplug_mode_update_property, 0);
905 drm_sysfs_connector_add(connector); 905 drm_connector_register(connector);
906 return 0; 906 return 0;
907} 907}
908 908
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index f437b30ce689..df567888bb1e 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -660,7 +660,7 @@ static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
660 return 0; 660 return 0;
661} 661}
662 662
663static struct drm_fb_helper_funcs qxl_fb_helper_funcs = { 663static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
664 .fb_probe = qxl_fb_find_or_create_single, 664 .fb_probe = qxl_fb_find_or_create_single,
665}; 665};
666 666
@@ -676,9 +676,12 @@ int qxl_fbdev_init(struct qxl_device *qdev)
676 676
677 qfbdev->qdev = qdev; 677 qfbdev->qdev = qdev;
678 qdev->mode_info.qfbdev = qfbdev; 678 qdev->mode_info.qfbdev = qfbdev;
679 qfbdev->helper.funcs = &qxl_fb_helper_funcs;
680 spin_lock_init(&qfbdev->delayed_ops_lock); 679 spin_lock_init(&qfbdev->delayed_ops_lock);
681 INIT_LIST_HEAD(&qfbdev->delayed_ops); 680 INIT_LIST_HEAD(&qfbdev->delayed_ops);
681
682 drm_fb_helper_prepare(qdev->ddev, &qfbdev->helper,
683 &qxl_fb_helper_funcs);
684
682 ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, 685 ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
683 qxl_num_crtc /* num_crtc - QXL supports just 1 */, 686 qxl_num_crtc /* num_crtc - QXL supports just 1 */,
684 QXLFB_CONN_LIMIT); 687 QXLFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index d458a140c024..83a423293afd 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -31,7 +31,7 @@ static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
31{ 31{
32 int r; 32 int r;
33 33
34 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); 34 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
35 if (unlikely(r != 0)) { 35 if (unlikely(r != 0)) {
36 if (r != -ERESTARTSYS) { 36 if (r != -ERESTARTSYS) {
37 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; 37 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
@@ -67,7 +67,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
67{ 67{
68 int r; 68 int r;
69 69
70 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); 70 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
71 if (unlikely(r != 0)) { 71 if (unlikely(r != 0)) {
72 if (r != -ERESTARTSYS) { 72 if (r != -ERESTARTSYS) {
73 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; 73 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index dbcbfe80aac0..0013ad0db9ef 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -80,7 +80,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ 80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ 81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ 82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
83 ci_dpm.o dce6_afmt.o radeon_vm.o 83 ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o
84 84
85# add async DMA block 85# add async DMA block
86radeon-y += \ 86radeon-y += \
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 7d68203a3737..a7f2ddf09a9d 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -331,12 +331,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
331 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) 331 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
332 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; 332 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
333 333
334 /* get the native mode for LVDS */ 334 /* get the native mode for scaling */
335 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) 335 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
336 radeon_panel_mode_fixup(encoder, adjusted_mode); 336 radeon_panel_mode_fixup(encoder, adjusted_mode);
337 337 } else if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
338 /* get the native mode for TV */
339 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
340 struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; 338 struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
341 if (tv_dac) { 339 if (tv_dac) {
342 if (tv_dac->tv_std == TV_STD_NTSC || 340 if (tv_dac->tv_std == TV_STD_NTSC ||
@@ -346,6 +344,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
346 else 344 else
347 radeon_atom_get_tv_timings(rdev, 1, adjusted_mode); 345 radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
348 } 346 }
347 } else if (radeon_encoder->rmx_type != RMX_OFF) {
348 radeon_panel_mode_fixup(encoder, adjusted_mode);
349 } 349 }
350 350
351 if (ASIC_IS_DCE3(rdev) && 351 if (ASIC_IS_DCE3(rdev) &&
@@ -716,7 +716,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
716 if (radeon_connector->use_digital && 716 if (radeon_connector->use_digital &&
717 (radeon_connector->audio == RADEON_AUDIO_ENABLE)) 717 (radeon_connector->audio == RADEON_AUDIO_ENABLE))
718 return ATOM_ENCODER_MODE_HDMI; 718 return ATOM_ENCODER_MODE_HDMI;
719 else if (drm_detect_hdmi_monitor(radeon_connector->edid) && 719 else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
720 (radeon_connector->audio == RADEON_AUDIO_AUTO)) 720 (radeon_connector->audio == RADEON_AUDIO_AUTO))
721 return ATOM_ENCODER_MODE_HDMI; 721 return ATOM_ENCODER_MODE_HDMI;
722 else if (radeon_connector->use_digital) 722 else if (radeon_connector->use_digital)
@@ -735,7 +735,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
735 if (radeon_audio != 0) { 735 if (radeon_audio != 0) {
736 if (radeon_connector->audio == RADEON_AUDIO_ENABLE) 736 if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
737 return ATOM_ENCODER_MODE_HDMI; 737 return ATOM_ENCODER_MODE_HDMI;
738 else if (drm_detect_hdmi_monitor(radeon_connector->edid) && 738 else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
739 (radeon_connector->audio == RADEON_AUDIO_AUTO)) 739 (radeon_connector->audio == RADEON_AUDIO_AUTO))
740 return ATOM_ENCODER_MODE_HDMI; 740 return ATOM_ENCODER_MODE_HDMI;
741 else 741 else
@@ -755,7 +755,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
755 } else if (radeon_audio != 0) { 755 } else if (radeon_audio != 0) {
756 if (radeon_connector->audio == RADEON_AUDIO_ENABLE) 756 if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
757 return ATOM_ENCODER_MODE_HDMI; 757 return ATOM_ENCODER_MODE_HDMI;
758 else if (drm_detect_hdmi_monitor(radeon_connector->edid) && 758 else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
759 (radeon_connector->audio == RADEON_AUDIO_AUTO)) 759 (radeon_connector->audio == RADEON_AUDIO_AUTO))
760 return ATOM_ENCODER_MODE_HDMI; 760 return ATOM_ENCODER_MODE_HDMI;
761 else 761 else
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 584090ac3eb9..022561e28707 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -940,7 +940,18 @@ static void ci_get_leakage_voltages(struct radeon_device *rdev)
940 pi->vddc_leakage.count = 0; 940 pi->vddc_leakage.count = 0;
941 pi->vddci_leakage.count = 0; 941 pi->vddci_leakage.count = 0;
942 942
943 if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) { 943 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
944 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
945 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
946 if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
947 continue;
948 if (vddc != 0 && vddc != virtual_voltage_id) {
949 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
950 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
951 pi->vddc_leakage.count++;
952 }
953 }
954 } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
944 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { 955 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
945 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 956 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
946 if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci, 957 if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 8debc9d47362..b630edc2fd0c 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -213,24 +213,37 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
213 if (!rdev->smc_fw) 213 if (!rdev->smc_fw)
214 return -EINVAL; 214 return -EINVAL;
215 215
216 switch (rdev->family) { 216 if (rdev->new_fw) {
217 case CHIP_BONAIRE: 217 const struct smc_firmware_header_v1_0 *hdr =
218 ucode_start_address = BONAIRE_SMC_UCODE_START; 218 (const struct smc_firmware_header_v1_0 *)rdev->smc_fw->data;
219 ucode_size = BONAIRE_SMC_UCODE_SIZE; 219
220 break; 220 radeon_ucode_print_smc_hdr(&hdr->header);
221 case CHIP_HAWAII: 221
222 ucode_start_address = HAWAII_SMC_UCODE_START; 222 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
223 ucode_size = HAWAII_SMC_UCODE_SIZE; 223 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
224 break; 224 src = (const u8 *)
225 default: 225 (rdev->smc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
226 DRM_ERROR("unknown asic in smc ucode loader\n"); 226 } else {
227 BUG(); 227 switch (rdev->family) {
228 case CHIP_BONAIRE:
229 ucode_start_address = BONAIRE_SMC_UCODE_START;
230 ucode_size = BONAIRE_SMC_UCODE_SIZE;
231 break;
232 case CHIP_HAWAII:
233 ucode_start_address = HAWAII_SMC_UCODE_START;
234 ucode_size = HAWAII_SMC_UCODE_SIZE;
235 break;
236 default:
237 DRM_ERROR("unknown asic in smc ucode loader\n");
238 BUG();
239 }
240
241 src = (const u8 *)rdev->smc_fw->data;
228 } 242 }
229 243
230 if (ucode_size & 3) 244 if (ucode_size & 3)
231 return -EINVAL; 245 return -EINVAL;
232 246
233 src = (const u8 *)rdev->smc_fw->data;
234 spin_lock_irqsave(&rdev->smc_idx_lock, flags); 247 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
235 WREG32(SMC_IND_INDEX_0, ucode_start_address); 248 WREG32(SMC_IND_INDEX_0, ucode_start_address);
236 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); 249 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index c0ea66192fe0..b625646bf3e2 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -42,6 +42,16 @@ MODULE_FIRMWARE("radeon/BONAIRE_mc2.bin");
42MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin"); 42MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
43MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin"); 43MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
44MODULE_FIRMWARE("radeon/BONAIRE_smc.bin"); 44MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
45
46MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
47MODULE_FIRMWARE("radeon/bonaire_me.bin");
48MODULE_FIRMWARE("radeon/bonaire_ce.bin");
49MODULE_FIRMWARE("radeon/bonaire_mec.bin");
50MODULE_FIRMWARE("radeon/bonaire_mc.bin");
51MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
52MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
53MODULE_FIRMWARE("radeon/bonaire_smc.bin");
54
45MODULE_FIRMWARE("radeon/HAWAII_pfp.bin"); 55MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
46MODULE_FIRMWARE("radeon/HAWAII_me.bin"); 56MODULE_FIRMWARE("radeon/HAWAII_me.bin");
47MODULE_FIRMWARE("radeon/HAWAII_ce.bin"); 57MODULE_FIRMWARE("radeon/HAWAII_ce.bin");
@@ -51,18 +61,45 @@ MODULE_FIRMWARE("radeon/HAWAII_mc2.bin");
51MODULE_FIRMWARE("radeon/HAWAII_rlc.bin"); 61MODULE_FIRMWARE("radeon/HAWAII_rlc.bin");
52MODULE_FIRMWARE("radeon/HAWAII_sdma.bin"); 62MODULE_FIRMWARE("radeon/HAWAII_sdma.bin");
53MODULE_FIRMWARE("radeon/HAWAII_smc.bin"); 63MODULE_FIRMWARE("radeon/HAWAII_smc.bin");
64
65MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
66MODULE_FIRMWARE("radeon/hawaii_me.bin");
67MODULE_FIRMWARE("radeon/hawaii_ce.bin");
68MODULE_FIRMWARE("radeon/hawaii_mec.bin");
69MODULE_FIRMWARE("radeon/hawaii_mc.bin");
70MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
71MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
72MODULE_FIRMWARE("radeon/hawaii_smc.bin");
73
54MODULE_FIRMWARE("radeon/KAVERI_pfp.bin"); 74MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
55MODULE_FIRMWARE("radeon/KAVERI_me.bin"); 75MODULE_FIRMWARE("radeon/KAVERI_me.bin");
56MODULE_FIRMWARE("radeon/KAVERI_ce.bin"); 76MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
57MODULE_FIRMWARE("radeon/KAVERI_mec.bin"); 77MODULE_FIRMWARE("radeon/KAVERI_mec.bin");
58MODULE_FIRMWARE("radeon/KAVERI_rlc.bin"); 78MODULE_FIRMWARE("radeon/KAVERI_rlc.bin");
59MODULE_FIRMWARE("radeon/KAVERI_sdma.bin"); 79MODULE_FIRMWARE("radeon/KAVERI_sdma.bin");
80
81MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
82MODULE_FIRMWARE("radeon/kaveri_me.bin");
83MODULE_FIRMWARE("radeon/kaveri_ce.bin");
84MODULE_FIRMWARE("radeon/kaveri_mec.bin");
85MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
86MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
87MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
88
60MODULE_FIRMWARE("radeon/KABINI_pfp.bin"); 89MODULE_FIRMWARE("radeon/KABINI_pfp.bin");
61MODULE_FIRMWARE("radeon/KABINI_me.bin"); 90MODULE_FIRMWARE("radeon/KABINI_me.bin");
62MODULE_FIRMWARE("radeon/KABINI_ce.bin"); 91MODULE_FIRMWARE("radeon/KABINI_ce.bin");
63MODULE_FIRMWARE("radeon/KABINI_mec.bin"); 92MODULE_FIRMWARE("radeon/KABINI_mec.bin");
64MODULE_FIRMWARE("radeon/KABINI_rlc.bin"); 93MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
65MODULE_FIRMWARE("radeon/KABINI_sdma.bin"); 94MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
95
96MODULE_FIRMWARE("radeon/kabini_pfp.bin");
97MODULE_FIRMWARE("radeon/kabini_me.bin");
98MODULE_FIRMWARE("radeon/kabini_ce.bin");
99MODULE_FIRMWARE("radeon/kabini_mec.bin");
100MODULE_FIRMWARE("radeon/kabini_rlc.bin");
101MODULE_FIRMWARE("radeon/kabini_sdma.bin");
102
66MODULE_FIRMWARE("radeon/MULLINS_pfp.bin"); 103MODULE_FIRMWARE("radeon/MULLINS_pfp.bin");
67MODULE_FIRMWARE("radeon/MULLINS_me.bin"); 104MODULE_FIRMWARE("radeon/MULLINS_me.bin");
68MODULE_FIRMWARE("radeon/MULLINS_ce.bin"); 105MODULE_FIRMWARE("radeon/MULLINS_ce.bin");
@@ -70,6 +107,13 @@ MODULE_FIRMWARE("radeon/MULLINS_mec.bin");
70MODULE_FIRMWARE("radeon/MULLINS_rlc.bin"); 107MODULE_FIRMWARE("radeon/MULLINS_rlc.bin");
71MODULE_FIRMWARE("radeon/MULLINS_sdma.bin"); 108MODULE_FIRMWARE("radeon/MULLINS_sdma.bin");
72 109
110MODULE_FIRMWARE("radeon/mullins_pfp.bin");
111MODULE_FIRMWARE("radeon/mullins_me.bin");
112MODULE_FIRMWARE("radeon/mullins_ce.bin");
113MODULE_FIRMWARE("radeon/mullins_mec.bin");
114MODULE_FIRMWARE("radeon/mullins_rlc.bin");
115MODULE_FIRMWARE("radeon/mullins_sdma.bin");
116
73extern int r600_ih_ring_alloc(struct radeon_device *rdev); 117extern int r600_ih_ring_alloc(struct radeon_device *rdev);
74extern void r600_ih_ring_fini(struct radeon_device *rdev); 118extern void r600_ih_ring_fini(struct radeon_device *rdev);
75extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); 119extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
@@ -1760,27 +1804,44 @@ static void cik_srbm_select(struct radeon_device *rdev,
1760 */ 1804 */
1761int ci_mc_load_microcode(struct radeon_device *rdev) 1805int ci_mc_load_microcode(struct radeon_device *rdev)
1762{ 1806{
1763 const __be32 *fw_data; 1807 const __be32 *fw_data = NULL;
1808 const __le32 *new_fw_data = NULL;
1764 u32 running, blackout = 0; 1809 u32 running, blackout = 0;
1765 u32 *io_mc_regs; 1810 u32 *io_mc_regs = NULL;
1811 const __le32 *new_io_mc_regs = NULL;
1766 int i, regs_size, ucode_size; 1812 int i, regs_size, ucode_size;
1767 1813
1768 if (!rdev->mc_fw) 1814 if (!rdev->mc_fw)
1769 return -EINVAL; 1815 return -EINVAL;
1770 1816
1771 ucode_size = rdev->mc_fw->size / 4; 1817 if (rdev->new_fw) {
1818 const struct mc_firmware_header_v1_0 *hdr =
1819 (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
1772 1820
1773 switch (rdev->family) { 1821 radeon_ucode_print_mc_hdr(&hdr->header);
1774 case CHIP_BONAIRE: 1822
1775 io_mc_regs = (u32 *)&bonaire_io_mc_regs; 1823 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
1776 regs_size = BONAIRE_IO_MC_REGS_SIZE; 1824 new_io_mc_regs = (const __le32 *)
1777 break; 1825 (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
1778 case CHIP_HAWAII: 1826 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1779 io_mc_regs = (u32 *)&hawaii_io_mc_regs; 1827 new_fw_data = (const __le32 *)
1780 regs_size = HAWAII_IO_MC_REGS_SIZE; 1828 (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1781 break; 1829 } else {
1782 default: 1830 ucode_size = rdev->mc_fw->size / 4;
1783 return -EINVAL; 1831
1832 switch (rdev->family) {
1833 case CHIP_BONAIRE:
1834 io_mc_regs = (u32 *)&bonaire_io_mc_regs;
1835 regs_size = BONAIRE_IO_MC_REGS_SIZE;
1836 break;
1837 case CHIP_HAWAII:
1838 io_mc_regs = (u32 *)&hawaii_io_mc_regs;
1839 regs_size = HAWAII_IO_MC_REGS_SIZE;
1840 break;
1841 default:
1842 return -EINVAL;
1843 }
1844 fw_data = (const __be32 *)rdev->mc_fw->data;
1784 } 1845 }
1785 1846
1786 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 1847 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -1797,13 +1858,21 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
1797 1858
1798 /* load mc io regs */ 1859 /* load mc io regs */
1799 for (i = 0; i < regs_size; i++) { 1860 for (i = 0; i < regs_size; i++) {
1800 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); 1861 if (rdev->new_fw) {
1801 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); 1862 WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
1863 WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
1864 } else {
1865 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
1866 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
1867 }
1802 } 1868 }
1803 /* load the MC ucode */ 1869 /* load the MC ucode */
1804 fw_data = (const __be32 *)rdev->mc_fw->data; 1870 for (i = 0; i < ucode_size; i++) {
1805 for (i = 0; i < ucode_size; i++) 1871 if (rdev->new_fw)
1806 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); 1872 WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
1873 else
1874 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
1875 }
1807 1876
1808 /* put the engine back into the active state */ 1877 /* put the engine back into the active state */
1809 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 1878 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
@@ -1841,17 +1910,21 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
1841static int cik_init_microcode(struct radeon_device *rdev) 1910static int cik_init_microcode(struct radeon_device *rdev)
1842{ 1911{
1843 const char *chip_name; 1912 const char *chip_name;
1913 const char *new_chip_name;
1844 size_t pfp_req_size, me_req_size, ce_req_size, 1914 size_t pfp_req_size, me_req_size, ce_req_size,
1845 mec_req_size, rlc_req_size, mc_req_size = 0, 1915 mec_req_size, rlc_req_size, mc_req_size = 0,
1846 sdma_req_size, smc_req_size = 0, mc2_req_size = 0; 1916 sdma_req_size, smc_req_size = 0, mc2_req_size = 0;
1847 char fw_name[30]; 1917 char fw_name[30];
1918 int new_fw = 0;
1848 int err; 1919 int err;
1920 int num_fw;
1849 1921
1850 DRM_DEBUG("\n"); 1922 DRM_DEBUG("\n");
1851 1923
1852 switch (rdev->family) { 1924 switch (rdev->family) {
1853 case CHIP_BONAIRE: 1925 case CHIP_BONAIRE:
1854 chip_name = "BONAIRE"; 1926 chip_name = "BONAIRE";
1927 new_chip_name = "bonaire";
1855 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 1928 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1856 me_req_size = CIK_ME_UCODE_SIZE * 4; 1929 me_req_size = CIK_ME_UCODE_SIZE * 4;
1857 ce_req_size = CIK_CE_UCODE_SIZE * 4; 1930 ce_req_size = CIK_CE_UCODE_SIZE * 4;
@@ -1861,9 +1934,11 @@ static int cik_init_microcode(struct radeon_device *rdev)
1861 mc2_req_size = BONAIRE_MC2_UCODE_SIZE * 4; 1934 mc2_req_size = BONAIRE_MC2_UCODE_SIZE * 4;
1862 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1935 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1863 smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4); 1936 smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
1937 num_fw = 8;
1864 break; 1938 break;
1865 case CHIP_HAWAII: 1939 case CHIP_HAWAII:
1866 chip_name = "HAWAII"; 1940 chip_name = "HAWAII";
1941 new_chip_name = "hawaii";
1867 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 1942 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1868 me_req_size = CIK_ME_UCODE_SIZE * 4; 1943 me_req_size = CIK_ME_UCODE_SIZE * 4;
1869 ce_req_size = CIK_CE_UCODE_SIZE * 4; 1944 ce_req_size = CIK_CE_UCODE_SIZE * 4;
@@ -1873,142 +1948,285 @@ static int cik_init_microcode(struct radeon_device *rdev)
1873 mc2_req_size = HAWAII_MC2_UCODE_SIZE * 4; 1948 mc2_req_size = HAWAII_MC2_UCODE_SIZE * 4;
1874 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1949 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1875 smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4); 1950 smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
1951 num_fw = 8;
1876 break; 1952 break;
1877 case CHIP_KAVERI: 1953 case CHIP_KAVERI:
1878 chip_name = "KAVERI"; 1954 chip_name = "KAVERI";
1955 new_chip_name = "kaveri";
1879 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 1956 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1880 me_req_size = CIK_ME_UCODE_SIZE * 4; 1957 me_req_size = CIK_ME_UCODE_SIZE * 4;
1881 ce_req_size = CIK_CE_UCODE_SIZE * 4; 1958 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1882 mec_req_size = CIK_MEC_UCODE_SIZE * 4; 1959 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1883 rlc_req_size = KV_RLC_UCODE_SIZE * 4; 1960 rlc_req_size = KV_RLC_UCODE_SIZE * 4;
1884 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1961 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1962 num_fw = 7;
1885 break; 1963 break;
1886 case CHIP_KABINI: 1964 case CHIP_KABINI:
1887 chip_name = "KABINI"; 1965 chip_name = "KABINI";
1966 new_chip_name = "kabini";
1888 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 1967 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1889 me_req_size = CIK_ME_UCODE_SIZE * 4; 1968 me_req_size = CIK_ME_UCODE_SIZE * 4;
1890 ce_req_size = CIK_CE_UCODE_SIZE * 4; 1969 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1891 mec_req_size = CIK_MEC_UCODE_SIZE * 4; 1970 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1892 rlc_req_size = KB_RLC_UCODE_SIZE * 4; 1971 rlc_req_size = KB_RLC_UCODE_SIZE * 4;
1893 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1972 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1973 num_fw = 6;
1894 break; 1974 break;
1895 case CHIP_MULLINS: 1975 case CHIP_MULLINS:
1896 chip_name = "MULLINS"; 1976 chip_name = "MULLINS";
1977 new_chip_name = "mullins";
1897 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 1978 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1898 me_req_size = CIK_ME_UCODE_SIZE * 4; 1979 me_req_size = CIK_ME_UCODE_SIZE * 4;
1899 ce_req_size = CIK_CE_UCODE_SIZE * 4; 1980 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1900 mec_req_size = CIK_MEC_UCODE_SIZE * 4; 1981 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1901 rlc_req_size = ML_RLC_UCODE_SIZE * 4; 1982 rlc_req_size = ML_RLC_UCODE_SIZE * 4;
1902 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1983 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1984 num_fw = 6;
1903 break; 1985 break;
1904 default: BUG(); 1986 default: BUG();
1905 } 1987 }
1906 1988
1907 DRM_INFO("Loading %s Microcode\n", chip_name); 1989 DRM_INFO("Loading %s Microcode\n", new_chip_name);
1908 1990
1909 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 1991 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
1910 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); 1992 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1911 if (err) 1993 if (err) {
1912 goto out; 1994 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1913 if (rdev->pfp_fw->size != pfp_req_size) { 1995 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1914 printk(KERN_ERR 1996 if (err)
1915 "cik_cp: Bogus length %zu in firmware \"%s\"\n", 1997 goto out;
1916 rdev->pfp_fw->size, fw_name); 1998 if (rdev->pfp_fw->size != pfp_req_size) {
1917 err = -EINVAL; 1999 printk(KERN_ERR
1918 goto out; 2000 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
2001 rdev->pfp_fw->size, fw_name);
2002 err = -EINVAL;
2003 goto out;
2004 }
2005 } else {
2006 err = radeon_ucode_validate(rdev->pfp_fw);
2007 if (err) {
2008 printk(KERN_ERR
2009 "cik_fw: validation failed for firmware \"%s\"\n",
2010 fw_name);
2011 goto out;
2012 } else {
2013 new_fw++;
2014 }
1919 } 2015 }
1920 2016
1921 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 2017 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
1922 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); 2018 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1923 if (err) 2019 if (err) {
1924 goto out; 2020 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1925 if (rdev->me_fw->size != me_req_size) { 2021 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1926 printk(KERN_ERR 2022 if (err)
1927 "cik_cp: Bogus length %zu in firmware \"%s\"\n", 2023 goto out;
1928 rdev->me_fw->size, fw_name); 2024 if (rdev->me_fw->size != me_req_size) {
1929 err = -EINVAL; 2025 printk(KERN_ERR
2026 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
2027 rdev->me_fw->size, fw_name);
2028 err = -EINVAL;
2029 }
2030 } else {
2031 err = radeon_ucode_validate(rdev->me_fw);
2032 if (err) {
2033 printk(KERN_ERR
2034 "cik_fw: validation failed for firmware \"%s\"\n",
2035 fw_name);
2036 goto out;
2037 } else {
2038 new_fw++;
2039 }
1930 } 2040 }
1931 2041
1932 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); 2042 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
1933 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); 2043 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1934 if (err) 2044 if (err) {
1935 goto out; 2045 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
1936 if (rdev->ce_fw->size != ce_req_size) { 2046 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1937 printk(KERN_ERR 2047 if (err)
1938 "cik_cp: Bogus length %zu in firmware \"%s\"\n", 2048 goto out;
1939 rdev->ce_fw->size, fw_name); 2049 if (rdev->ce_fw->size != ce_req_size) {
1940 err = -EINVAL; 2050 printk(KERN_ERR
2051 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
2052 rdev->ce_fw->size, fw_name);
2053 err = -EINVAL;
2054 }
2055 } else {
2056 err = radeon_ucode_validate(rdev->ce_fw);
2057 if (err) {
2058 printk(KERN_ERR
2059 "cik_fw: validation failed for firmware \"%s\"\n",
2060 fw_name);
2061 goto out;
2062 } else {
2063 new_fw++;
2064 }
1941 } 2065 }
1942 2066
1943 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name); 2067 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", new_chip_name);
1944 err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev); 2068 err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
1945 if (err) 2069 if (err) {
1946 goto out; 2070 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
1947 if (rdev->mec_fw->size != mec_req_size) { 2071 err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
1948 printk(KERN_ERR 2072 if (err)
1949 "cik_cp: Bogus length %zu in firmware \"%s\"\n", 2073 goto out;
1950 rdev->mec_fw->size, fw_name); 2074 if (rdev->mec_fw->size != mec_req_size) {
1951 err = -EINVAL; 2075 printk(KERN_ERR
2076 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
2077 rdev->mec_fw->size, fw_name);
2078 err = -EINVAL;
2079 }
2080 } else {
2081 err = radeon_ucode_validate(rdev->mec_fw);
2082 if (err) {
2083 printk(KERN_ERR
2084 "cik_fw: validation failed for firmware \"%s\"\n",
2085 fw_name);
2086 goto out;
2087 } else {
2088 new_fw++;
2089 }
2090 }
2091
2092 if (rdev->family == CHIP_KAVERI) {
2093 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", new_chip_name);
2094 err = request_firmware(&rdev->mec2_fw, fw_name, rdev->dev);
2095 if (err) {
2096 goto out;
2097 } else {
2098 err = radeon_ucode_validate(rdev->mec2_fw);
2099 if (err) {
2100 goto out;
2101 } else {
2102 new_fw++;
2103 }
2104 }
1952 } 2105 }
1953 2106
1954 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); 2107 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
1955 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); 2108 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1956 if (err) 2109 if (err) {
1957 goto out; 2110 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
1958 if (rdev->rlc_fw->size != rlc_req_size) { 2111 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1959 printk(KERN_ERR 2112 if (err)
1960 "cik_rlc: Bogus length %zu in firmware \"%s\"\n", 2113 goto out;
1961 rdev->rlc_fw->size, fw_name); 2114 if (rdev->rlc_fw->size != rlc_req_size) {
1962 err = -EINVAL; 2115 printk(KERN_ERR
2116 "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
2117 rdev->rlc_fw->size, fw_name);
2118 err = -EINVAL;
2119 }
2120 } else {
2121 err = radeon_ucode_validate(rdev->rlc_fw);
2122 if (err) {
2123 printk(KERN_ERR
2124 "cik_fw: validation failed for firmware \"%s\"\n",
2125 fw_name);
2126 goto out;
2127 } else {
2128 new_fw++;
2129 }
1963 } 2130 }
1964 2131
1965 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); 2132 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", new_chip_name);
1966 err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev); 2133 err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
1967 if (err) 2134 if (err) {
1968 goto out; 2135 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
1969 if (rdev->sdma_fw->size != sdma_req_size) { 2136 err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
1970 printk(KERN_ERR 2137 if (err)
1971 "cik_sdma: Bogus length %zu in firmware \"%s\"\n", 2138 goto out;
1972 rdev->sdma_fw->size, fw_name); 2139 if (rdev->sdma_fw->size != sdma_req_size) {
1973 err = -EINVAL; 2140 printk(KERN_ERR
2141 "cik_sdma: Bogus length %zu in firmware \"%s\"\n",
2142 rdev->sdma_fw->size, fw_name);
2143 err = -EINVAL;
2144 }
2145 } else {
2146 err = radeon_ucode_validate(rdev->sdma_fw);
2147 if (err) {
2148 printk(KERN_ERR
2149 "cik_fw: validation failed for firmware \"%s\"\n",
2150 fw_name);
2151 goto out;
2152 } else {
2153 new_fw++;
2154 }
1974 } 2155 }
1975 2156
1976 /* No SMC, MC ucode on APUs */ 2157 /* No SMC, MC ucode on APUs */
1977 if (!(rdev->flags & RADEON_IS_IGP)) { 2158 if (!(rdev->flags & RADEON_IS_IGP)) {
1978 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); 2159 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
1979 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 2160 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1980 if (err) { 2161 if (err) {
1981 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 2162 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
1982 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 2163 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1983 if (err) 2164 if (err) {
2165 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
2166 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
2167 if (err)
2168 goto out;
2169 }
2170 if ((rdev->mc_fw->size != mc_req_size) &&
2171 (rdev->mc_fw->size != mc2_req_size)){
2172 printk(KERN_ERR
2173 "cik_mc: Bogus length %zu in firmware \"%s\"\n",
2174 rdev->mc_fw->size, fw_name);
2175 err = -EINVAL;
2176 }
2177 DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
2178 } else {
2179 err = radeon_ucode_validate(rdev->mc_fw);
2180 if (err) {
2181 printk(KERN_ERR
2182 "cik_fw: validation failed for firmware \"%s\"\n",
2183 fw_name);
1984 goto out; 2184 goto out;
2185 } else {
2186 new_fw++;
2187 }
1985 } 2188 }
1986 if ((rdev->mc_fw->size != mc_req_size) &&
1987 (rdev->mc_fw->size != mc2_req_size)){
1988 printk(KERN_ERR
1989 "cik_mc: Bogus length %zu in firmware \"%s\"\n",
1990 rdev->mc_fw->size, fw_name);
1991 err = -EINVAL;
1992 }
1993 DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
1994 2189
1995 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); 2190 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
1996 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); 2191 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1997 if (err) { 2192 if (err) {
1998 printk(KERN_ERR 2193 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
1999 "smc: error loading firmware \"%s\"\n", 2194 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
2000 fw_name); 2195 if (err) {
2001 release_firmware(rdev->smc_fw); 2196 printk(KERN_ERR
2002 rdev->smc_fw = NULL; 2197 "smc: error loading firmware \"%s\"\n",
2003 err = 0; 2198 fw_name);
2004 } else if (rdev->smc_fw->size != smc_req_size) { 2199 release_firmware(rdev->smc_fw);
2005 printk(KERN_ERR 2200 rdev->smc_fw = NULL;
2006 "cik_smc: Bogus length %zu in firmware \"%s\"\n", 2201 err = 0;
2007 rdev->smc_fw->size, fw_name); 2202 } else if (rdev->smc_fw->size != smc_req_size) {
2008 err = -EINVAL; 2203 printk(KERN_ERR
2204 "cik_smc: Bogus length %zu in firmware \"%s\"\n",
2205 rdev->smc_fw->size, fw_name);
2206 err = -EINVAL;
2207 }
2208 } else {
2209 err = radeon_ucode_validate(rdev->smc_fw);
2210 if (err) {
2211 printk(KERN_ERR
2212 "cik_fw: validation failed for firmware \"%s\"\n",
2213 fw_name);
2214 goto out;
2215 } else {
2216 new_fw++;
2217 }
2009 } 2218 }
2010 } 2219 }
2011 2220
2221 if (new_fw == 0) {
2222 rdev->new_fw = false;
2223 } else if (new_fw < num_fw) {
2224 printk(KERN_ERR "ci_fw: mixing new and old firmware!\n");
2225 err = -EINVAL;
2226 } else {
2227 rdev->new_fw = true;
2228 }
2229
2012out: 2230out:
2013 if (err) { 2231 if (err) {
2014 if (err != -EINVAL) 2232 if (err != -EINVAL)
@@ -2021,8 +2239,14 @@ out:
2021 rdev->me_fw = NULL; 2239 rdev->me_fw = NULL;
2022 release_firmware(rdev->ce_fw); 2240 release_firmware(rdev->ce_fw);
2023 rdev->ce_fw = NULL; 2241 rdev->ce_fw = NULL;
2242 release_firmware(rdev->mec_fw);
2243 rdev->mec_fw = NULL;
2244 release_firmware(rdev->mec2_fw);
2245 rdev->mec2_fw = NULL;
2024 release_firmware(rdev->rlc_fw); 2246 release_firmware(rdev->rlc_fw);
2025 rdev->rlc_fw = NULL; 2247 rdev->rlc_fw = NULL;
2248 release_firmware(rdev->sdma_fw);
2249 rdev->sdma_fw = NULL;
2026 release_firmware(rdev->mc_fw); 2250 release_firmware(rdev->mc_fw);
2027 rdev->mc_fw = NULL; 2251 rdev->mc_fw = NULL;
2028 release_firmware(rdev->smc_fw); 2252 release_firmware(rdev->smc_fw);
@@ -3666,8 +3890,6 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
3666 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2)); 3890 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
3667 radeon_ring_write(ring, fence->seq); 3891 radeon_ring_write(ring, fence->seq);
3668 radeon_ring_write(ring, 0); 3892 radeon_ring_write(ring, 0);
3669 /* HDP flush */
3670 cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
3671} 3893}
3672 3894
3673/** 3895/**
@@ -3696,8 +3918,6 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
3696 radeon_ring_write(ring, upper_32_bits(addr)); 3918 radeon_ring_write(ring, upper_32_bits(addr));
3697 radeon_ring_write(ring, fence->seq); 3919 radeon_ring_write(ring, fence->seq);
3698 radeon_ring_write(ring, 0); 3920 radeon_ring_write(ring, 0);
3699 /* HDP flush */
3700 cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
3701} 3921}
3702 3922
3703bool cik_semaphore_ring_emit(struct radeon_device *rdev, 3923bool cik_semaphore_ring_emit(struct radeon_device *rdev,
@@ -3969,7 +4189,6 @@ static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
3969 */ 4189 */
3970static int cik_cp_gfx_load_microcode(struct radeon_device *rdev) 4190static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
3971{ 4191{
3972 const __be32 *fw_data;
3973 int i; 4192 int i;
3974 4193
3975 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw) 4194 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
@@ -3977,26 +4196,70 @@ static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
3977 4196
3978 cik_cp_gfx_enable(rdev, false); 4197 cik_cp_gfx_enable(rdev, false);
3979 4198
3980 /* PFP */ 4199 if (rdev->new_fw) {
3981 fw_data = (const __be32 *)rdev->pfp_fw->data; 4200 const struct gfx_firmware_header_v1_0 *pfp_hdr =
3982 WREG32(CP_PFP_UCODE_ADDR, 0); 4201 (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
3983 for (i = 0; i < CIK_PFP_UCODE_SIZE; i++) 4202 const struct gfx_firmware_header_v1_0 *ce_hdr =
3984 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 4203 (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
3985 WREG32(CP_PFP_UCODE_ADDR, 0); 4204 const struct gfx_firmware_header_v1_0 *me_hdr =
3986 4205 (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
3987 /* CE */ 4206 const __le32 *fw_data;
3988 fw_data = (const __be32 *)rdev->ce_fw->data; 4207 u32 fw_size;
3989 WREG32(CP_CE_UCODE_ADDR, 0); 4208
3990 for (i = 0; i < CIK_CE_UCODE_SIZE; i++) 4209 radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
3991 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++)); 4210 radeon_ucode_print_gfx_hdr(&ce_hdr->header);
3992 WREG32(CP_CE_UCODE_ADDR, 0); 4211 radeon_ucode_print_gfx_hdr(&me_hdr->header);
3993 4212
3994 /* ME */ 4213 /* PFP */
3995 fw_data = (const __be32 *)rdev->me_fw->data; 4214 fw_data = (const __le32 *)
3996 WREG32(CP_ME_RAM_WADDR, 0); 4215 (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3997 for (i = 0; i < CIK_ME_UCODE_SIZE; i++) 4216 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3998 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 4217 WREG32(CP_PFP_UCODE_ADDR, 0);
3999 WREG32(CP_ME_RAM_WADDR, 0); 4218 for (i = 0; i < fw_size; i++)
4219 WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
4220 WREG32(CP_PFP_UCODE_ADDR, 0);
4221
4222 /* CE */
4223 fw_data = (const __le32 *)
4224 (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
4225 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
4226 WREG32(CP_CE_UCODE_ADDR, 0);
4227 for (i = 0; i < fw_size; i++)
4228 WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
4229 WREG32(CP_CE_UCODE_ADDR, 0);
4230
4231 /* ME */
4232 fw_data = (const __be32 *)
4233 (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
4234 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
4235 WREG32(CP_ME_RAM_WADDR, 0);
4236 for (i = 0; i < fw_size; i++)
4237 WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
4238 WREG32(CP_ME_RAM_WADDR, 0);
4239 } else {
4240 const __be32 *fw_data;
4241
4242 /* PFP */
4243 fw_data = (const __be32 *)rdev->pfp_fw->data;
4244 WREG32(CP_PFP_UCODE_ADDR, 0);
4245 for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
4246 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
4247 WREG32(CP_PFP_UCODE_ADDR, 0);
4248
4249 /* CE */
4250 fw_data = (const __be32 *)rdev->ce_fw->data;
4251 WREG32(CP_CE_UCODE_ADDR, 0);
4252 for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
4253 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
4254 WREG32(CP_CE_UCODE_ADDR, 0);
4255
4256 /* ME */
4257 fw_data = (const __be32 *)rdev->me_fw->data;
4258 WREG32(CP_ME_RAM_WADDR, 0);
4259 for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
4260 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
4261 WREG32(CP_ME_RAM_WADDR, 0);
4262 }
4000 4263
4001 WREG32(CP_PFP_UCODE_ADDR, 0); 4264 WREG32(CP_PFP_UCODE_ADDR, 0);
4002 WREG32(CP_CE_UCODE_ADDR, 0); 4265 WREG32(CP_CE_UCODE_ADDR, 0);
@@ -4261,7 +4524,6 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
4261 */ 4524 */
4262static int cik_cp_compute_load_microcode(struct radeon_device *rdev) 4525static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
4263{ 4526{
4264 const __be32 *fw_data;
4265 int i; 4527 int i;
4266 4528
4267 if (!rdev->mec_fw) 4529 if (!rdev->mec_fw)
@@ -4269,20 +4531,55 @@ static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
4269 4531
4270 cik_cp_compute_enable(rdev, false); 4532 cik_cp_compute_enable(rdev, false);
4271 4533
4272 /* MEC1 */ 4534 if (rdev->new_fw) {
4273 fw_data = (const __be32 *)rdev->mec_fw->data; 4535 const struct gfx_firmware_header_v1_0 *mec_hdr =
4274 WREG32(CP_MEC_ME1_UCODE_ADDR, 0); 4536 (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data;
4275 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++) 4537 const __le32 *fw_data;
4276 WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++)); 4538 u32 fw_size;
4277 WREG32(CP_MEC_ME1_UCODE_ADDR, 0); 4539
4540 radeon_ucode_print_gfx_hdr(&mec_hdr->header);
4541
4542 /* MEC1 */
4543 fw_data = (const __le32 *)
4544 (rdev->mec_fw->data + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
4545 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
4546 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
4547 for (i = 0; i < fw_size; i++)
4548 WREG32(CP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
4549 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
4278 4550
4279 if (rdev->family == CHIP_KAVERI) {
4280 /* MEC2 */ 4551 /* MEC2 */
4552 if (rdev->family == CHIP_KAVERI) {
4553 const struct gfx_firmware_header_v1_0 *mec2_hdr =
4554 (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data;
4555
4556 fw_data = (const __le32 *)
4557 (rdev->mec2_fw->data +
4558 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
4559 fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
4560 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
4561 for (i = 0; i < fw_size; i++)
4562 WREG32(CP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
4563 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
4564 }
4565 } else {
4566 const __be32 *fw_data;
4567
4568 /* MEC1 */
4281 fw_data = (const __be32 *)rdev->mec_fw->data; 4569 fw_data = (const __be32 *)rdev->mec_fw->data;
4282 WREG32(CP_MEC_ME2_UCODE_ADDR, 0); 4570 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
4283 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++) 4571 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
4284 WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++)); 4572 WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
4285 WREG32(CP_MEC_ME2_UCODE_ADDR, 0); 4573 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
4574
4575 if (rdev->family == CHIP_KAVERI) {
4576 /* MEC2 */
4577 fw_data = (const __be32 *)rdev->mec_fw->data;
4578 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
4579 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
4580 WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
4581 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
4582 }
4286 } 4583 }
4287 4584
4288 return 0; 4585 return 0;
@@ -4375,7 +4672,7 @@ static int cik_mec_init(struct radeon_device *rdev)
4375 r = radeon_bo_create(rdev, 4672 r = radeon_bo_create(rdev,
4376 rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2, 4673 rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
4377 PAGE_SIZE, true, 4674 PAGE_SIZE, true,
4378 RADEON_GEM_DOMAIN_GTT, NULL, 4675 RADEON_GEM_DOMAIN_GTT, 0, NULL,
4379 &rdev->mec.hpd_eop_obj); 4676 &rdev->mec.hpd_eop_obj);
4380 if (r) { 4677 if (r) {
4381 dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r); 4678 dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -4545,7 +4842,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
4545 r = radeon_bo_create(rdev, 4842 r = radeon_bo_create(rdev,
4546 sizeof(struct bonaire_mqd), 4843 sizeof(struct bonaire_mqd),
4547 PAGE_SIZE, true, 4844 PAGE_SIZE, true,
4548 RADEON_GEM_DOMAIN_GTT, NULL, 4845 RADEON_GEM_DOMAIN_GTT, 0, NULL,
4549 &rdev->ring[idx].mqd_obj); 4846 &rdev->ring[idx].mqd_obj);
4550 if (r) { 4847 if (r) {
4551 dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r); 4848 dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
@@ -5402,7 +5699,6 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
5402 r = radeon_gart_table_vram_pin(rdev); 5699 r = radeon_gart_table_vram_pin(rdev);
5403 if (r) 5700 if (r)
5404 return r; 5701 return r;
5405 radeon_gart_restore(rdev);
5406 /* Setup TLB control */ 5702 /* Setup TLB control */
5407 WREG32(MC_VM_MX_L1_TLB_CNTL, 5703 WREG32(MC_VM_MX_L1_TLB_CNTL,
5408 (0xA << 7) | 5704 (0xA << 7) |
@@ -5642,12 +5938,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
5642void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 5938void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
5643{ 5939{
5644 struct radeon_ring *ring = &rdev->ring[ridx]; 5940 struct radeon_ring *ring = &rdev->ring[ridx];
5941 int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX);
5645 5942
5646 if (vm == NULL) 5943 if (vm == NULL)
5647 return; 5944 return;
5648 5945
5649 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5946 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5650 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5947 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
5651 WRITE_DATA_DST_SEL(0))); 5948 WRITE_DATA_DST_SEL(0)));
5652 if (vm->id < 8) { 5949 if (vm->id < 8) {
5653 radeon_ring_write(ring, 5950 radeon_ring_write(ring,
@@ -5697,7 +5994,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
5697 radeon_ring_write(ring, 1 << vm->id); 5994 radeon_ring_write(ring, 1 << vm->id);
5698 5995
5699 /* compute doesn't have PFP */ 5996 /* compute doesn't have PFP */
5700 if (ridx == RADEON_RING_TYPE_GFX_INDEX) { 5997 if (usepfp) {
5701 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 5998 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5702 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 5999 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5703 radeon_ring_write(ring, 0x0); 6000 radeon_ring_write(ring, 0x0);
@@ -5865,28 +6162,10 @@ static void cik_rlc_start(struct radeon_device *rdev)
5865static int cik_rlc_resume(struct radeon_device *rdev) 6162static int cik_rlc_resume(struct radeon_device *rdev)
5866{ 6163{
5867 u32 i, size, tmp; 6164 u32 i, size, tmp;
5868 const __be32 *fw_data;
5869 6165
5870 if (!rdev->rlc_fw) 6166 if (!rdev->rlc_fw)
5871 return -EINVAL; 6167 return -EINVAL;
5872 6168
5873 switch (rdev->family) {
5874 case CHIP_BONAIRE:
5875 case CHIP_HAWAII:
5876 default:
5877 size = BONAIRE_RLC_UCODE_SIZE;
5878 break;
5879 case CHIP_KAVERI:
5880 size = KV_RLC_UCODE_SIZE;
5881 break;
5882 case CHIP_KABINI:
5883 size = KB_RLC_UCODE_SIZE;
5884 break;
5885 case CHIP_MULLINS:
5886 size = ML_RLC_UCODE_SIZE;
5887 break;
5888 }
5889
5890 cik_rlc_stop(rdev); 6169 cik_rlc_stop(rdev);
5891 6170
5892 /* disable CG */ 6171 /* disable CG */
@@ -5910,11 +6189,45 @@ static int cik_rlc_resume(struct radeon_device *rdev)
5910 WREG32(RLC_MC_CNTL, 0); 6189 WREG32(RLC_MC_CNTL, 0);
5911 WREG32(RLC_UCODE_CNTL, 0); 6190 WREG32(RLC_UCODE_CNTL, 0);
5912 6191
5913 fw_data = (const __be32 *)rdev->rlc_fw->data; 6192 if (rdev->new_fw) {
6193 const struct rlc_firmware_header_v1_0 *hdr =
6194 (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
6195 const __le32 *fw_data = (const __le32 *)
6196 (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
6197
6198 radeon_ucode_print_rlc_hdr(&hdr->header);
6199
6200 size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
5914 WREG32(RLC_GPM_UCODE_ADDR, 0); 6201 WREG32(RLC_GPM_UCODE_ADDR, 0);
5915 for (i = 0; i < size; i++) 6202 for (i = 0; i < size; i++)
5916 WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++)); 6203 WREG32(RLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
5917 WREG32(RLC_GPM_UCODE_ADDR, 0); 6204 WREG32(RLC_GPM_UCODE_ADDR, 0);
6205 } else {
6206 const __be32 *fw_data;
6207
6208 switch (rdev->family) {
6209 case CHIP_BONAIRE:
6210 case CHIP_HAWAII:
6211 default:
6212 size = BONAIRE_RLC_UCODE_SIZE;
6213 break;
6214 case CHIP_KAVERI:
6215 size = KV_RLC_UCODE_SIZE;
6216 break;
6217 case CHIP_KABINI:
6218 size = KB_RLC_UCODE_SIZE;
6219 break;
6220 case CHIP_MULLINS:
6221 size = ML_RLC_UCODE_SIZE;
6222 break;
6223 }
6224
6225 fw_data = (const __be32 *)rdev->rlc_fw->data;
6226 WREG32(RLC_GPM_UCODE_ADDR, 0);
6227 for (i = 0; i < size; i++)
6228 WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
6229 WREG32(RLC_GPM_UCODE_ADDR, 0);
6230 }
5918 6231
5919 /* XXX - find out what chips support lbpw */ 6232 /* XXX - find out what chips support lbpw */
5920 cik_enable_lbpw(rdev, false); 6233 cik_enable_lbpw(rdev, false);
@@ -6348,11 +6661,10 @@ static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable)
6348 6661
6349void cik_init_cp_pg_table(struct radeon_device *rdev) 6662void cik_init_cp_pg_table(struct radeon_device *rdev)
6350{ 6663{
6351 const __be32 *fw_data;
6352 volatile u32 *dst_ptr; 6664 volatile u32 *dst_ptr;
6353 int me, i, max_me = 4; 6665 int me, i, max_me = 4;
6354 u32 bo_offset = 0; 6666 u32 bo_offset = 0;
6355 u32 table_offset; 6667 u32 table_offset, table_size;
6356 6668
6357 if (rdev->family == CHIP_KAVERI) 6669 if (rdev->family == CHIP_KAVERI)
6358 max_me = 5; 6670 max_me = 5;
@@ -6363,24 +6675,71 @@ void cik_init_cp_pg_table(struct radeon_device *rdev)
6363 /* write the cp table buffer */ 6675 /* write the cp table buffer */
6364 dst_ptr = rdev->rlc.cp_table_ptr; 6676 dst_ptr = rdev->rlc.cp_table_ptr;
6365 for (me = 0; me < max_me; me++) { 6677 for (me = 0; me < max_me; me++) {
6366 if (me == 0) { 6678 if (rdev->new_fw) {
6367 fw_data = (const __be32 *)rdev->ce_fw->data; 6679 const __le32 *fw_data;
6368 table_offset = CP_ME_TABLE_OFFSET; 6680 const struct gfx_firmware_header_v1_0 *hdr;
6369 } else if (me == 1) { 6681
6370 fw_data = (const __be32 *)rdev->pfp_fw->data; 6682 if (me == 0) {
6371 table_offset = CP_ME_TABLE_OFFSET; 6683 hdr = (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
6372 } else if (me == 2) { 6684 fw_data = (const __le32 *)
6373 fw_data = (const __be32 *)rdev->me_fw->data; 6685 (rdev->ce_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
6374 table_offset = CP_ME_TABLE_OFFSET; 6686 table_offset = le32_to_cpu(hdr->jt_offset);
6687 table_size = le32_to_cpu(hdr->jt_size);
6688 } else if (me == 1) {
6689 hdr = (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
6690 fw_data = (const __le32 *)
6691 (rdev->pfp_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
6692 table_offset = le32_to_cpu(hdr->jt_offset);
6693 table_size = le32_to_cpu(hdr->jt_size);
6694 } else if (me == 2) {
6695 hdr = (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
6696 fw_data = (const __le32 *)
6697 (rdev->me_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
6698 table_offset = le32_to_cpu(hdr->jt_offset);
6699 table_size = le32_to_cpu(hdr->jt_size);
6700 } else if (me == 3) {
6701 hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data;
6702 fw_data = (const __le32 *)
6703 (rdev->mec_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
6704 table_offset = le32_to_cpu(hdr->jt_offset);
6705 table_size = le32_to_cpu(hdr->jt_size);
6706 } else {
6707 hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data;
6708 fw_data = (const __le32 *)
6709 (rdev->mec2_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
6710 table_offset = le32_to_cpu(hdr->jt_offset);
6711 table_size = le32_to_cpu(hdr->jt_size);
6712 }
6713
6714 for (i = 0; i < table_size; i ++) {
6715 dst_ptr[bo_offset + i] =
6716 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
6717 }
6718 bo_offset += table_size;
6375 } else { 6719 } else {
6376 fw_data = (const __be32 *)rdev->mec_fw->data; 6720 const __be32 *fw_data;
6377 table_offset = CP_MEC_TABLE_OFFSET; 6721 table_size = CP_ME_TABLE_SIZE;
6378 } 6722
6723 if (me == 0) {
6724 fw_data = (const __be32 *)rdev->ce_fw->data;
6725 table_offset = CP_ME_TABLE_OFFSET;
6726 } else if (me == 1) {
6727 fw_data = (const __be32 *)rdev->pfp_fw->data;
6728 table_offset = CP_ME_TABLE_OFFSET;
6729 } else if (me == 2) {
6730 fw_data = (const __be32 *)rdev->me_fw->data;
6731 table_offset = CP_ME_TABLE_OFFSET;
6732 } else {
6733 fw_data = (const __be32 *)rdev->mec_fw->data;
6734 table_offset = CP_MEC_TABLE_OFFSET;
6735 }
6379 6736
6380 for (i = 0; i < CP_ME_TABLE_SIZE; i ++) { 6737 for (i = 0; i < table_size; i ++) {
6381 dst_ptr[bo_offset + i] = cpu_to_le32(be32_to_cpu(fw_data[table_offset + i])); 6738 dst_ptr[bo_offset + i] =
6739 cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
6740 }
6741 bo_offset += table_size;
6382 } 6742 }
6383 bo_offset += CP_ME_TABLE_SIZE;
6384 } 6743 }
6385} 6744}
6386 6745
@@ -7618,7 +7977,8 @@ restart_ih:
7618 case 16: /* D5 page flip */ 7977 case 16: /* D5 page flip */
7619 case 18: /* D6 page flip */ 7978 case 18: /* D6 page flip */
7620 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); 7979 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
7621 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); 7980 if (radeon_use_pflipirq > 0)
7981 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
7622 break; 7982 break;
7623 case 42: /* HPD hotplug */ 7983 case 42: /* HPD hotplug */
7624 switch (src_data) { 7984 switch (src_data) {
@@ -7900,6 +8260,7 @@ restart_ih:
7900static int cik_startup(struct radeon_device *rdev) 8260static int cik_startup(struct radeon_device *rdev)
7901{ 8261{
7902 struct radeon_ring *ring; 8262 struct radeon_ring *ring;
8263 u32 nop;
7903 int r; 8264 int r;
7904 8265
7905 /* enable pcie gen2/3 link */ 8266 /* enable pcie gen2/3 link */
@@ -8033,9 +8394,18 @@ static int cik_startup(struct radeon_device *rdev)
8033 } 8394 }
8034 cik_irq_set(rdev); 8395 cik_irq_set(rdev);
8035 8396
8397 if (rdev->family == CHIP_HAWAII) {
8398 if (rdev->new_fw)
8399 nop = PACKET3(PACKET3_NOP, 0x3FFF);
8400 else
8401 nop = RADEON_CP_PACKET2;
8402 } else {
8403 nop = PACKET3(PACKET3_NOP, 0x3FFF);
8404 }
8405
8036 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 8406 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
8037 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 8407 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
8038 PACKET3(PACKET3_NOP, 0x3FFF)); 8408 nop);
8039 if (r) 8409 if (r)
8040 return r; 8410 return r;
8041 8411
@@ -8043,7 +8413,7 @@ static int cik_startup(struct radeon_device *rdev)
8043 /* type-2 packets are deprecated on MEC, use type-3 instead */ 8413 /* type-2 packets are deprecated on MEC, use type-3 instead */
8044 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 8414 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
8045 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, 8415 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
8046 PACKET3(PACKET3_NOP, 0x3FFF)); 8416 nop);
8047 if (r) 8417 if (r)
8048 return r; 8418 return r;
8049 ring->me = 1; /* first MEC */ 8419 ring->me = 1; /* first MEC */
@@ -8054,7 +8424,7 @@ static int cik_startup(struct radeon_device *rdev)
8054 /* type-2 packets are deprecated on MEC, use type-3 instead */ 8424 /* type-2 packets are deprecated on MEC, use type-3 instead */
8055 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 8425 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
8056 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, 8426 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
8057 PACKET3(PACKET3_NOP, 0x3FFF)); 8427 nop);
8058 if (r) 8428 if (r)
8059 return r; 8429 return r;
8060 /* dGPU only have 1 MEC */ 8430 /* dGPU only have 1 MEC */
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 8e9d0f1d858e..bcf480510ac2 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -24,6 +24,7 @@
24#include <linux/firmware.h> 24#include <linux/firmware.h>
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "radeon.h" 26#include "radeon.h"
27#include "radeon_ucode.h"
27#include "radeon_asic.h" 28#include "radeon_asic.h"
28#include "radeon_trace.h" 29#include "radeon_trace.h"
29#include "cikd.h" 30#include "cikd.h"
@@ -118,6 +119,7 @@ void cik_sdma_set_wptr(struct radeon_device *rdev,
118 reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET; 119 reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
119 120
120 WREG32(reg, (ring->wptr << 2) & 0x3fffc); 121 WREG32(reg, (ring->wptr << 2) & 0x3fffc);
122 (void)RREG32(reg);
121} 123}
122 124
123/** 125/**
@@ -419,7 +421,6 @@ static int cik_sdma_rlc_resume(struct radeon_device *rdev)
419 */ 421 */
420static int cik_sdma_load_microcode(struct radeon_device *rdev) 422static int cik_sdma_load_microcode(struct radeon_device *rdev)
421{ 423{
422 const __be32 *fw_data;
423 int i; 424 int i;
424 425
425 if (!rdev->sdma_fw) 426 if (!rdev->sdma_fw)
@@ -428,19 +429,48 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev)
428 /* halt the MEs */ 429 /* halt the MEs */
429 cik_sdma_enable(rdev, false); 430 cik_sdma_enable(rdev, false);
430 431
431 /* sdma0 */ 432 if (rdev->new_fw) {
432 fw_data = (const __be32 *)rdev->sdma_fw->data; 433 const struct sdma_firmware_header_v1_0 *hdr =
433 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0); 434 (const struct sdma_firmware_header_v1_0 *)rdev->sdma_fw->data;
434 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++) 435 const __le32 *fw_data;
435 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++)); 436 u32 fw_size;
436 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION); 437
437 438 radeon_ucode_print_sdma_hdr(&hdr->header);
438 /* sdma1 */ 439
439 fw_data = (const __be32 *)rdev->sdma_fw->data; 440 /* sdma0 */
440 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0); 441 fw_data = (const __le32 *)
441 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++) 442 (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
442 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++)); 443 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
443 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION); 444 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
445 for (i = 0; i < fw_size; i++)
446 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, le32_to_cpup(fw_data++));
447 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
448
449 /* sdma1 */
450 fw_data = (const __le32 *)
451 (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
452 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
453 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
454 for (i = 0; i < fw_size; i++)
455 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, le32_to_cpup(fw_data++));
456 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
457 } else {
458 const __be32 *fw_data;
459
460 /* sdma0 */
461 fw_data = (const __be32 *)rdev->sdma_fw->data;
462 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
463 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
464 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
465 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
466
467 /* sdma1 */
468 fw_data = (const __be32 *)rdev->sdma_fw->data;
469 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
470 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
471 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
472 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
473 }
444 474
445 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0); 475 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
446 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0); 476 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
@@ -719,7 +749,43 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
719} 749}
720 750
721/** 751/**
722 * cik_sdma_vm_set_page - update the page tables using sDMA 752 * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
753 *
754 * @rdev: radeon_device pointer
755 * @ib: indirect buffer to fill with commands
756 * @pe: addr of the page entry
757 * @src: src addr to copy from
758 * @count: number of page entries to update
759 *
760 * Update PTEs by copying them from the GART using sDMA (CIK).
761 */
762void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
763 struct radeon_ib *ib,
764 uint64_t pe, uint64_t src,
765 unsigned count)
766{
767 while (count) {
768 unsigned bytes = count * 8;
769 if (bytes > 0x1FFFF8)
770 bytes = 0x1FFFF8;
771
772 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
773 SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
774 ib->ptr[ib->length_dw++] = bytes;
775 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
776 ib->ptr[ib->length_dw++] = lower_32_bits(src);
777 ib->ptr[ib->length_dw++] = upper_32_bits(src);
778 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
779 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
780
781 pe += bytes;
782 src += bytes;
783 count -= bytes / 8;
784 }
785}
786
787/**
788 * cik_sdma_vm_write_pages - update PTEs by writing them manually
723 * 789 *
724 * @rdev: radeon_device pointer 790 * @rdev: radeon_device pointer
725 * @ib: indirect buffer to fill with commands 791 * @ib: indirect buffer to fill with commands
@@ -729,84 +795,103 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
729 * @incr: increase next addr by incr bytes 795 * @incr: increase next addr by incr bytes
730 * @flags: access flags 796 * @flags: access flags
731 * 797 *
732 * Update the page tables using sDMA (CIK). 798 * Update PTEs by writing them manually using sDMA (CIK).
733 */ 799 */
734void cik_sdma_vm_set_page(struct radeon_device *rdev, 800void cik_sdma_vm_write_pages(struct radeon_device *rdev,
735 struct radeon_ib *ib, 801 struct radeon_ib *ib,
736 uint64_t pe, 802 uint64_t pe,
737 uint64_t addr, unsigned count, 803 uint64_t addr, unsigned count,
738 uint32_t incr, uint32_t flags) 804 uint32_t incr, uint32_t flags)
739{ 805{
740 uint64_t value; 806 uint64_t value;
741 unsigned ndw; 807 unsigned ndw;
742 808
743 trace_radeon_vm_set_page(pe, addr, count, incr, flags); 809 while (count) {
744 810 ndw = count * 2;
745 if (flags == R600_PTE_GART) { 811 if (ndw > 0xFFFFE)
746 uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; 812 ndw = 0xFFFFE;
747 while (count) { 813
748 unsigned bytes = count * 8; 814 /* for non-physically contiguous pages (system) */
749 if (bytes > 0x1FFFF8) 815 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
750 bytes = 0x1FFFF8; 816 SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
751 817 ib->ptr[ib->length_dw++] = pe;
752 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 818 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
753 ib->ptr[ib->length_dw++] = bytes; 819 ib->ptr[ib->length_dw++] = ndw;
754 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 820 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
755 ib->ptr[ib->length_dw++] = lower_32_bits(src); 821 if (flags & R600_PTE_SYSTEM) {
756 ib->ptr[ib->length_dw++] = upper_32_bits(src);
757 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
758 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
759
760 pe += bytes;
761 src += bytes;
762 count -= bytes / 8;
763 }
764 } else if (flags & R600_PTE_SYSTEM) {
765 while (count) {
766 ndw = count * 2;
767 if (ndw > 0xFFFFE)
768 ndw = 0xFFFFE;
769
770 /* for non-physically contiguous pages (system) */
771 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
772 ib->ptr[ib->length_dw++] = pe;
773 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
774 ib->ptr[ib->length_dw++] = ndw;
775 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
776 value = radeon_vm_map_gart(rdev, addr); 822 value = radeon_vm_map_gart(rdev, addr);
777 value &= 0xFFFFFFFFFFFFF000ULL; 823 value &= 0xFFFFFFFFFFFFF000ULL;
778 addr += incr; 824 } else if (flags & R600_PTE_VALID) {
779 value |= flags;
780 ib->ptr[ib->length_dw++] = value;
781 ib->ptr[ib->length_dw++] = upper_32_bits(value);
782 }
783 }
784 } else {
785 while (count) {
786 ndw = count;
787 if (ndw > 0x7FFFF)
788 ndw = 0x7FFFF;
789
790 if (flags & R600_PTE_VALID)
791 value = addr; 825 value = addr;
792 else 826 } else {
793 value = 0; 827 value = 0;
794 /* for physically contiguous pages (vram) */ 828 }
795 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); 829 addr += incr;
796 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 830 value |= flags;
797 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 831 ib->ptr[ib->length_dw++] = value;
798 ib->ptr[ib->length_dw++] = flags; /* mask */
799 ib->ptr[ib->length_dw++] = 0;
800 ib->ptr[ib->length_dw++] = value; /* value */
801 ib->ptr[ib->length_dw++] = upper_32_bits(value); 832 ib->ptr[ib->length_dw++] = upper_32_bits(value);
802 ib->ptr[ib->length_dw++] = incr; /* increment size */
803 ib->ptr[ib->length_dw++] = 0;
804 ib->ptr[ib->length_dw++] = ndw; /* number of entries */
805 pe += ndw * 8;
806 addr += ndw * incr;
807 count -= ndw;
808 } 833 }
809 } 834 }
835}
836
837/**
838 * cik_sdma_vm_set_pages - update the page tables using sDMA
839 *
840 * @rdev: radeon_device pointer
841 * @ib: indirect buffer to fill with commands
842 * @pe: addr of the page entry
843 * @addr: dst addr to write into pe
844 * @count: number of page entries to update
845 * @incr: increase next addr by incr bytes
846 * @flags: access flags
847 *
848 * Update the page tables using sDMA (CIK).
849 */
850void cik_sdma_vm_set_pages(struct radeon_device *rdev,
851 struct radeon_ib *ib,
852 uint64_t pe,
853 uint64_t addr, unsigned count,
854 uint32_t incr, uint32_t flags)
855{
856 uint64_t value;
857 unsigned ndw;
858
859 while (count) {
860 ndw = count;
861 if (ndw > 0x7FFFF)
862 ndw = 0x7FFFF;
863
864 if (flags & R600_PTE_VALID)
865 value = addr;
866 else
867 value = 0;
868
869 /* for physically contiguous pages (vram) */
870 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
871 ib->ptr[ib->length_dw++] = pe; /* dst addr */
872 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
873 ib->ptr[ib->length_dw++] = flags; /* mask */
874 ib->ptr[ib->length_dw++] = 0;
875 ib->ptr[ib->length_dw++] = value; /* value */
876 ib->ptr[ib->length_dw++] = upper_32_bits(value);
877 ib->ptr[ib->length_dw++] = incr; /* increment size */
878 ib->ptr[ib->length_dw++] = 0;
879 ib->ptr[ib->length_dw++] = ndw; /* number of entries */
880
881 pe += ndw * 8;
882 addr += ndw * incr;
883 count -= ndw;
884 }
885}
886
887/**
888 * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
889 *
890 * @ib: indirect buffer to fill with padding
891 *
892 */
893void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
894{
810 while (ib->length_dw & 0x7) 895 while (ib->length_dw & 0x7)
811 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0); 896 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
812} 897}
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 0a65dc7e93e7..ab29f953a767 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -136,13 +136,13 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
136 tmp = VIDEO_LIPSYNC(connector->video_latency[1]) | 136 tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
137 AUDIO_LIPSYNC(connector->audio_latency[1]); 137 AUDIO_LIPSYNC(connector->audio_latency[1]);
138 else 138 else
139 tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255); 139 tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
140 } else { 140 } else {
141 if (connector->latency_present[0]) 141 if (connector->latency_present[0])
142 tmp = VIDEO_LIPSYNC(connector->video_latency[0]) | 142 tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
143 AUDIO_LIPSYNC(connector->audio_latency[0]); 143 AUDIO_LIPSYNC(connector->audio_latency[0]);
144 else 144 else
145 tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255); 145 tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
146 } 146 }
147 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); 147 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
148} 148}
@@ -164,8 +164,10 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
164 offset = dig->afmt->pin->offset; 164 offset = dig->afmt->pin->offset;
165 165
166 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 166 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
167 if (connector->encoder == encoder) 167 if (connector->encoder == encoder) {
168 radeon_connector = to_radeon_connector(connector); 168 radeon_connector = to_radeon_connector(connector);
169 break;
170 }
169 } 171 }
170 172
171 if (!radeon_connector) { 173 if (!radeon_connector) {
@@ -173,7 +175,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
173 return; 175 return;
174 } 176 }
175 177
176 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); 178 sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
177 if (sad_count <= 0) { 179 if (sad_count <= 0) {
178 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 180 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
179 return; 181 return;
@@ -225,8 +227,10 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
225 offset = dig->afmt->pin->offset; 227 offset = dig->afmt->pin->offset;
226 228
227 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 229 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
228 if (connector->encoder == encoder) 230 if (connector->encoder == encoder) {
229 radeon_connector = to_radeon_connector(connector); 231 radeon_connector = to_radeon_connector(connector);
232 break;
233 }
230 } 234 }
231 235
232 if (!radeon_connector) { 236 if (!radeon_connector) {
@@ -234,7 +238,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
234 return; 238 return;
235 } 239 }
236 240
237 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 241 sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
238 if (sad_count <= 0) { 242 if (sad_count <= 0) {
239 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 243 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
240 return; 244 return;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 15e4f28015e1..4fedd14e670a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2424,7 +2424,6 @@ static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
2424 r = radeon_gart_table_vram_pin(rdev); 2424 r = radeon_gart_table_vram_pin(rdev);
2425 if (r) 2425 if (r)
2426 return r; 2426 return r;
2427 radeon_gart_restore(rdev);
2428 /* Setup L2 cache */ 2427 /* Setup L2 cache */
2429 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 2428 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
2430 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 2429 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
@@ -2677,7 +2676,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
2677 if (save->crtc_enabled[i]) { 2676 if (save->crtc_enabled[i]) {
2678 if (ASIC_IS_DCE6(rdev)) { 2677 if (ASIC_IS_DCE6(rdev)) {
2679 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); 2678 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
2680 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; 2679 tmp &= ~EVERGREEN_CRTC_BLANK_DATA_EN;
2681 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 2680 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2682 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 2681 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
2683 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); 2682 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
@@ -4023,7 +4022,8 @@ int sumo_rlc_init(struct radeon_device *rdev)
4023 /* save restore block */ 4022 /* save restore block */
4024 if (rdev->rlc.save_restore_obj == NULL) { 4023 if (rdev->rlc.save_restore_obj == NULL) {
4025 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, 4024 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
4026 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj); 4025 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
4026 &rdev->rlc.save_restore_obj);
4027 if (r) { 4027 if (r) {
4028 dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); 4028 dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
4029 return r; 4029 return r;
@@ -4101,7 +4101,8 @@ int sumo_rlc_init(struct radeon_device *rdev)
4101 4101
4102 if (rdev->rlc.clear_state_obj == NULL) { 4102 if (rdev->rlc.clear_state_obj == NULL) {
4103 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, 4103 r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
4104 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj); 4104 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
4105 &rdev->rlc.clear_state_obj);
4105 if (r) { 4106 if (r) {
4106 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); 4107 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
4107 sumo_rlc_fini(rdev); 4108 sumo_rlc_fini(rdev);
@@ -4175,8 +4176,10 @@ int sumo_rlc_init(struct radeon_device *rdev)
4175 4176
4176 if (rdev->rlc.cp_table_size) { 4177 if (rdev->rlc.cp_table_size) {
4177 if (rdev->rlc.cp_table_obj == NULL) { 4178 if (rdev->rlc.cp_table_obj == NULL) {
4178 r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, PAGE_SIZE, true, 4179 r = radeon_bo_create(rdev, rdev->rlc.cp_table_size,
4179 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.cp_table_obj); 4180 PAGE_SIZE, true,
4181 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
4182 &rdev->rlc.cp_table_obj);
4180 if (r) { 4183 if (r) {
4181 dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r); 4184 dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
4182 sumo_rlc_fini(rdev); 4185 sumo_rlc_fini(rdev);
@@ -4961,7 +4964,8 @@ restart_ih:
4961 case 16: /* D5 page flip */ 4964 case 16: /* D5 page flip */
4962 case 18: /* D6 page flip */ 4965 case 18: /* D6 page flip */
4963 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); 4966 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
4964 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); 4967 if (radeon_use_pflipirq > 0)
4968 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
4965 break; 4969 break;
4966 case 42: /* HPD hotplug */ 4970 case 42: /* HPD hotplug */
4967 switch (src_data) { 4971 switch (src_data) {
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 1ec0e6e83f9f..278c7a139d74 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -117,7 +117,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
117 return; 117 return;
118 } 118 }
119 119
120 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); 120 sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
121 if (sad_count <= 0) { 121 if (sad_count <= 0) {
122 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 122 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
123 return; 123 return;
@@ -172,7 +172,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
172 return; 172 return;
173 } 173 }
174 174
175 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 175 sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
176 if (sad_count <= 0) { 176 if (sad_count <= 0) {
177 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 177 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
178 return; 178 return;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 5a33ca681867..327b85f7fd0d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1229,7 +1229,6 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
1229 r = radeon_gart_table_vram_pin(rdev); 1229 r = radeon_gart_table_vram_pin(rdev);
1230 if (r) 1230 if (r)
1231 return r; 1231 return r;
1232 radeon_gart_restore(rdev);
1233 /* Setup TLB control */ 1232 /* Setup TLB control */
1234 WREG32(MC_VM_MX_L1_TLB_CNTL, 1233 WREG32(MC_VM_MX_L1_TLB_CNTL,
1235 (0xA << 7) | 1234 (0xA << 7) |
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 6378e0276691..8a3e6221cece 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -307,7 +307,43 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
307} 307}
308 308
309/** 309/**
310 * cayman_dma_vm_set_page - update the page tables using the DMA 310 * cayman_dma_vm_copy_pages - update PTEs by copying them from the GART
311 *
312 * @rdev: radeon_device pointer
313 * @ib: indirect buffer to fill with commands
314 * @pe: addr of the page entry
315 * @src: src addr where to copy from
316 * @count: number of page entries to update
317 *
318 * Update PTEs by copying them from the GART using the DMA (cayman/TN).
319 */
320void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
321 struct radeon_ib *ib,
322 uint64_t pe, uint64_t src,
323 unsigned count)
324{
325 unsigned ndw;
326
327 while (count) {
328 ndw = count * 2;
329 if (ndw > 0xFFFFE)
330 ndw = 0xFFFFE;
331
332 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
333 0, 0, ndw);
334 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
335 ib->ptr[ib->length_dw++] = lower_32_bits(src);
336 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
337 ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
338
339 pe += ndw * 4;
340 src += ndw * 4;
341 count -= ndw / 2;
342 }
343}
344
345/**
346 * cayman_dma_vm_write_pages - update PTEs by writing them manually
311 * 347 *
312 * @rdev: radeon_device pointer 348 * @rdev: radeon_device pointer
313 * @ib: indirect buffer to fill with commands 349 * @ib: indirect buffer to fill with commands
@@ -315,71 +351,103 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
315 * @addr: dst addr to write into pe 351 * @addr: dst addr to write into pe
316 * @count: number of page entries to update 352 * @count: number of page entries to update
317 * @incr: increase next addr by incr bytes 353 * @incr: increase next addr by incr bytes
318 * @flags: hw access flags 354 * @flags: hw access flags
319 * 355 *
320 * Update the page tables using the DMA (cayman/TN). 356 * Update PTEs by writing them manually using the DMA (cayman/TN).
321 */ 357 */
322void cayman_dma_vm_set_page(struct radeon_device *rdev, 358void cayman_dma_vm_write_pages(struct radeon_device *rdev,
323 struct radeon_ib *ib, 359 struct radeon_ib *ib,
324 uint64_t pe, 360 uint64_t pe,
325 uint64_t addr, unsigned count, 361 uint64_t addr, unsigned count,
326 uint32_t incr, uint32_t flags) 362 uint32_t incr, uint32_t flags)
327{ 363{
328 uint64_t value; 364 uint64_t value;
329 unsigned ndw; 365 unsigned ndw;
330 366
331 trace_radeon_vm_set_page(pe, addr, count, incr, flags); 367 while (count) {
332 368 ndw = count * 2;
333 if ((flags & R600_PTE_SYSTEM) || (count == 1)) { 369 if (ndw > 0xFFFFE)
334 while (count) { 370 ndw = 0xFFFFE;
335 ndw = count * 2; 371
336 if (ndw > 0xFFFFE) 372 /* for non-physically contiguous pages (system) */
337 ndw = 0xFFFFE; 373 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE,
338 374 0, 0, ndw);
339 /* for non-physically contiguous pages (system) */ 375 ib->ptr[ib->length_dw++] = pe;
340 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw); 376 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
341 ib->ptr[ib->length_dw++] = pe; 377 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
342 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 378 if (flags & R600_PTE_SYSTEM) {
343 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 379 value = radeon_vm_map_gart(rdev, addr);
344 if (flags & R600_PTE_SYSTEM) { 380 value &= 0xFFFFFFFFFFFFF000ULL;
345 value = radeon_vm_map_gart(rdev, addr); 381 } else if (flags & R600_PTE_VALID) {
346 value &= 0xFFFFFFFFFFFFF000ULL;
347 } else if (flags & R600_PTE_VALID) {
348 value = addr;
349 } else {
350 value = 0;
351 }
352 addr += incr;
353 value |= flags;
354 ib->ptr[ib->length_dw++] = value;
355 ib->ptr[ib->length_dw++] = upper_32_bits(value);
356 }
357 }
358 } else {
359 while (count) {
360 ndw = count * 2;
361 if (ndw > 0xFFFFE)
362 ndw = 0xFFFFE;
363
364 if (flags & R600_PTE_VALID)
365 value = addr; 382 value = addr;
366 else 383 } else {
367 value = 0; 384 value = 0;
368 /* for physically contiguous pages (vram) */ 385 }
369 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); 386 addr += incr;
370 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 387 value |= flags;
371 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 388 ib->ptr[ib->length_dw++] = value;
372 ib->ptr[ib->length_dw++] = flags; /* mask */
373 ib->ptr[ib->length_dw++] = 0;
374 ib->ptr[ib->length_dw++] = value; /* value */
375 ib->ptr[ib->length_dw++] = upper_32_bits(value); 389 ib->ptr[ib->length_dw++] = upper_32_bits(value);
376 ib->ptr[ib->length_dw++] = incr; /* increment size */
377 ib->ptr[ib->length_dw++] = 0;
378 pe += ndw * 4;
379 addr += (ndw / 2) * incr;
380 count -= ndw / 2;
381 } 390 }
382 } 391 }
392}
393
394/**
395 * cayman_dma_vm_set_pages - update the page tables using the DMA
396 *
397 * @rdev: radeon_device pointer
398 * @ib: indirect buffer to fill with commands
399 * @pe: addr of the page entry
400 * @addr: dst addr to write into pe
401 * @count: number of page entries to update
402 * @incr: increase next addr by incr bytes
403 * @flags: hw access flags
404 *
405 * Update the page tables using the DMA (cayman/TN).
406 */
407void cayman_dma_vm_set_pages(struct radeon_device *rdev,
408 struct radeon_ib *ib,
409 uint64_t pe,
410 uint64_t addr, unsigned count,
411 uint32_t incr, uint32_t flags)
412{
413 uint64_t value;
414 unsigned ndw;
415
416 while (count) {
417 ndw = count * 2;
418 if (ndw > 0xFFFFE)
419 ndw = 0xFFFFE;
420
421 if (flags & R600_PTE_VALID)
422 value = addr;
423 else
424 value = 0;
425
426 /* for physically contiguous pages (vram) */
427 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
428 ib->ptr[ib->length_dw++] = pe; /* dst addr */
429 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
430 ib->ptr[ib->length_dw++] = flags; /* mask */
431 ib->ptr[ib->length_dw++] = 0;
432 ib->ptr[ib->length_dw++] = value; /* value */
433 ib->ptr[ib->length_dw++] = upper_32_bits(value);
434 ib->ptr[ib->length_dw++] = incr; /* increment size */
435 ib->ptr[ib->length_dw++] = 0;
436
437 pe += ndw * 4;
438 addr += (ndw / 2) * incr;
439 count -= ndw / 2;
440 }
441}
442
443/**
444 * cayman_dma_vm_pad_ib - pad the IB to the required number of dw
445 *
446 * @ib: indirect buffer to fill with padding
447 *
448 */
449void cayman_dma_vm_pad_ib(struct radeon_ib *ib)
450{
383 while (ib->length_dw & 0x7) 451 while (ib->length_dw & 0x7)
384 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0); 452 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
385} 453}
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 1544efcf1c3a..04b5940b8923 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -652,7 +652,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
652{ 652{
653 uint32_t tmp; 653 uint32_t tmp;
654 654
655 radeon_gart_restore(rdev);
656 /* discard memory request outside of configured range */ 655 /* discard memory request outside of configured range */
657 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 656 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
658 WREG32(RADEON_AIC_CNTL, tmp); 657 WREG32(RADEON_AIC_CNTL, tmp);
@@ -683,7 +682,7 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
683} 682}
684 683
685void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, 684void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
686 uint64_t addr) 685 uint64_t addr, uint32_t flags)
687{ 686{
688 u32 *gtt = rdev->gart.ptr; 687 u32 *gtt = rdev->gart.ptr;
689 gtt[i] = cpu_to_le32(lower_32_bits(addr)); 688 gtt[i] = cpu_to_le32(lower_32_bits(addr));
@@ -838,11 +837,7 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
838 /* Wait until IDLE & CLEAN */ 837 /* Wait until IDLE & CLEAN */
839 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 838 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
840 radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); 839 radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
841 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 840 r100_ring_hdp_flush(rdev, ring);
842 radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
843 RADEON_HDP_READ_BUFFER_INVALIDATE);
844 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
845 radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
846 /* Emit fence sequence & fire IRQ */ 841 /* Emit fence sequence & fire IRQ */
847 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); 842 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
848 radeon_ring_write(ring, fence->seq); 843 radeon_ring_write(ring, fence->seq);
@@ -1061,6 +1056,20 @@ void r100_gfx_set_wptr(struct radeon_device *rdev,
1061 (void)RREG32(RADEON_CP_RB_WPTR); 1056 (void)RREG32(RADEON_CP_RB_WPTR);
1062} 1057}
1063 1058
1059/**
1060 * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
1061 * rdev: radeon device structure
1062 * ring: ring buffer struct for emitting packets
1063 */
1064void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
1065{
1066 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
1067 radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
1068 RADEON_HDP_READ_BUFFER_INVALIDATE);
1069 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
1070 radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
1071}
1072
1064static void r100_cp_load_microcode(struct radeon_device *rdev) 1073static void r100_cp_load_microcode(struct radeon_device *rdev)
1065{ 1074{
1066 const __be32 *fw_data; 1075 const __be32 *fw_data;
@@ -1401,7 +1410,6 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1401 */ 1410 */
1402int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) 1411int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1403{ 1412{
1404 struct drm_mode_object *obj;
1405 struct drm_crtc *crtc; 1413 struct drm_crtc *crtc;
1406 struct radeon_crtc *radeon_crtc; 1414 struct radeon_crtc *radeon_crtc;
1407 struct radeon_cs_packet p3reloc, waitreloc; 1415 struct radeon_cs_packet p3reloc, waitreloc;
@@ -1441,12 +1449,11 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1441 header = radeon_get_ib_value(p, h_idx); 1449 header = radeon_get_ib_value(p, h_idx);
1442 crtc_id = radeon_get_ib_value(p, h_idx + 5); 1450 crtc_id = radeon_get_ib_value(p, h_idx + 5);
1443 reg = R100_CP_PACKET0_GET_REG(header); 1451 reg = R100_CP_PACKET0_GET_REG(header);
1444 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 1452 crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
1445 if (!obj) { 1453 if (!crtc) {
1446 DRM_ERROR("cannot find crtc %d\n", crtc_id); 1454 DRM_ERROR("cannot find crtc %d\n", crtc_id);
1447 return -ENOENT; 1455 return -ENOENT;
1448 } 1456 }
1449 crtc = obj_to_crtc(obj);
1450 radeon_crtc = to_radeon_crtc(crtc); 1457 radeon_crtc = to_radeon_crtc(crtc);
1451 crtc_id = radeon_crtc->crtc_id; 1458 crtc_id = radeon_crtc->crtc_id;
1452 1459
@@ -4067,39 +4074,6 @@ int r100_init(struct radeon_device *rdev)
4067 return 0; 4074 return 0;
4068} 4075}
4069 4076
4070uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
4071 bool always_indirect)
4072{
4073 if (reg < rdev->rmmio_size && !always_indirect)
4074 return readl(((void __iomem *)rdev->rmmio) + reg);
4075 else {
4076 unsigned long flags;
4077 uint32_t ret;
4078
4079 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
4080 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
4081 ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
4082 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
4083
4084 return ret;
4085 }
4086}
4087
4088void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
4089 bool always_indirect)
4090{
4091 if (reg < rdev->rmmio_size && !always_indirect)
4092 writel(v, ((void __iomem *)rdev->rmmio) + reg);
4093 else {
4094 unsigned long flags;
4095
4096 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
4097 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
4098 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
4099 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
4100 }
4101}
4102
4103u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) 4077u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
4104{ 4078{
4105 if (reg < rdev->rio_mem_size) 4079 if (reg < rdev->rio_mem_size)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 3c21d77a483d..75b30338c226 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -69,17 +69,23 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
69 mb(); 69 mb();
70} 70}
71 71
72#define R300_PTE_UNSNOOPED (1 << 0)
72#define R300_PTE_WRITEABLE (1 << 2) 73#define R300_PTE_WRITEABLE (1 << 2)
73#define R300_PTE_READABLE (1 << 3) 74#define R300_PTE_READABLE (1 << 3)
74 75
75void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, 76void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
76 uint64_t addr) 77 uint64_t addr, uint32_t flags)
77{ 78{
78 void __iomem *ptr = rdev->gart.ptr; 79 void __iomem *ptr = rdev->gart.ptr;
79 80
80 addr = (lower_32_bits(addr) >> 8) | 81 addr = (lower_32_bits(addr) >> 8) |
81 ((upper_32_bits(addr) & 0xff) << 24) | 82 ((upper_32_bits(addr) & 0xff) << 24);
82 R300_PTE_WRITEABLE | R300_PTE_READABLE; 83 if (flags & RADEON_GART_PAGE_READ)
84 addr |= R300_PTE_READABLE;
85 if (flags & RADEON_GART_PAGE_WRITE)
86 addr |= R300_PTE_WRITEABLE;
87 if (!(flags & RADEON_GART_PAGE_SNOOP))
88 addr |= R300_PTE_UNSNOOPED;
83 /* on x86 we want this to be CPU endian, on powerpc 89 /* on x86 we want this to be CPU endian, on powerpc
84 * on powerpc without HW swappers, it'll get swapped on way 90 * on powerpc without HW swappers, it'll get swapped on way
85 * into VRAM - so no need for cpu_to_le32 on VRAM tables */ 91 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
@@ -120,7 +126,6 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
120 r = radeon_gart_table_vram_pin(rdev); 126 r = radeon_gart_table_vram_pin(rdev);
121 if (r) 127 if (r)
122 return r; 128 return r;
123 radeon_gart_restore(rdev);
124 /* discard memory request outside of configured range */ 129 /* discard memory request outside of configured range */
125 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 130 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
126 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 131 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 3c69f58e46ef..c70a504d96af 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -968,7 +968,6 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev)
968 r = radeon_gart_table_vram_pin(rdev); 968 r = radeon_gart_table_vram_pin(rdev);
969 if (r) 969 if (r)
970 return r; 970 return r;
971 radeon_gart_restore(rdev);
972 971
973 /* Setup L2 cache */ 972 /* Setup L2 cache */
974 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 973 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -1339,7 +1338,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev)
1339 if (rdev->vram_scratch.robj == NULL) { 1338 if (rdev->vram_scratch.robj == NULL) {
1340 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, 1339 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1341 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 1340 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1342 NULL, &rdev->vram_scratch.robj); 1341 0, NULL, &rdev->vram_scratch.robj);
1343 if (r) { 1342 if (r) {
1344 return r; 1343 return r;
1345 } 1344 }
@@ -3227,7 +3226,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev)
3227 if (rdev->ih.ring_obj == NULL) { 3226 if (rdev->ih.ring_obj == NULL) {
3228 r = radeon_bo_create(rdev, rdev->ih.ring_size, 3227 r = radeon_bo_create(rdev, rdev->ih.ring_size,
3229 PAGE_SIZE, true, 3228 PAGE_SIZE, true,
3230 RADEON_GEM_DOMAIN_GTT, 3229 RADEON_GEM_DOMAIN_GTT, 0,
3231 NULL, &rdev->ih.ring_obj); 3230 NULL, &rdev->ih.ring_obj);
3232 if (r) { 3231 if (r) {
3233 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); 3232 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
@@ -3924,11 +3923,13 @@ restart_ih:
3924 break; 3923 break;
3925 case 9: /* D1 pflip */ 3924 case 9: /* D1 pflip */
3926 DRM_DEBUG("IH: D1 flip\n"); 3925 DRM_DEBUG("IH: D1 flip\n");
3927 radeon_crtc_handle_flip(rdev, 0); 3926 if (radeon_use_pflipirq > 0)
3927 radeon_crtc_handle_flip(rdev, 0);
3928 break; 3928 break;
3929 case 11: /* D2 pflip */ 3929 case 11: /* D2 pflip */
3930 DRM_DEBUG("IH: D2 flip\n"); 3930 DRM_DEBUG("IH: D2 flip\n");
3931 radeon_crtc_handle_flip(rdev, 1); 3931 if (radeon_use_pflipirq > 0)
3932 radeon_crtc_handle_flip(rdev, 1);
3932 break; 3933 break;
3933 case 19: /* HPD/DAC hotplug */ 3934 case 19: /* HPD/DAC hotplug */
3934 switch (src_data) { 3935 switch (src_data) {
@@ -4089,16 +4090,15 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
4089} 4090}
4090 4091
4091/** 4092/**
4092 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl 4093 * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
4093 * rdev: radeon device structure 4094 * rdev: radeon device structure
4094 * bo: buffer object struct which userspace is waiting for idle
4095 * 4095 *
4096 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed 4096 * Some R6XX/R7XX don't seem to take into account HDP flushes performed
4097 * through ring buffer, this leads to corruption in rendering, see 4097 * through the ring buffer. This leads to corruption in rendering, see
4098 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we 4098 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
4099 * directly perform HDP flush by writing register through MMIO. 4099 * directly perform the HDP flush by writing the register through MMIO.
4100 */ 4100 */
4101void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) 4101void r600_mmio_hdp_flush(struct radeon_device *rdev)
4102{ 4102{
4103 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 4103 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
4104 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL. 4104 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 12511bb5fd6f..c47537a1ddba 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -825,7 +825,6 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
825 uint32_t *vline_start_end, 825 uint32_t *vline_start_end,
826 uint32_t *vline_status) 826 uint32_t *vline_status)
827{ 827{
828 struct drm_mode_object *obj;
829 struct drm_crtc *crtc; 828 struct drm_crtc *crtc;
830 struct radeon_crtc *radeon_crtc; 829 struct radeon_crtc *radeon_crtc;
831 struct radeon_cs_packet p3reloc, wait_reg_mem; 830 struct radeon_cs_packet p3reloc, wait_reg_mem;
@@ -887,12 +886,11 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
887 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); 886 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
888 reg = R600_CP_PACKET0_GET_REG(header); 887 reg = R600_CP_PACKET0_GET_REG(header);
889 888
890 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 889 crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
891 if (!obj) { 890 if (!crtc) {
892 DRM_ERROR("cannot find crtc %d\n", crtc_id); 891 DRM_ERROR("cannot find crtc %d\n", crtc_id);
893 return -ENOENT; 892 return -ENOENT;
894 } 893 }
895 crtc = obj_to_crtc(obj);
896 radeon_crtc = to_radeon_crtc(crtc); 894 radeon_crtc = to_radeon_crtc(crtc);
897 crtc_id = radeon_crtc->crtc_id; 895 crtc_id = radeon_crtc->crtc_id;
898 896
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 60c47f829122..9e1732eb402c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -64,6 +64,7 @@
64#include <linux/wait.h> 64#include <linux/wait.h>
65#include <linux/list.h> 65#include <linux/list.h>
66#include <linux/kref.h> 66#include <linux/kref.h>
67#include <linux/interval_tree.h>
67 68
68#include <ttm/ttm_bo_api.h> 69#include <ttm/ttm_bo_api.h>
69#include <ttm/ttm_bo_driver.h> 70#include <ttm/ttm_bo_driver.h>
@@ -103,6 +104,7 @@ extern int radeon_hard_reset;
103extern int radeon_vm_size; 104extern int radeon_vm_size;
104extern int radeon_vm_block_size; 105extern int radeon_vm_block_size;
105extern int radeon_deep_color; 106extern int radeon_deep_color;
107extern int radeon_use_pflipirq;
106 108
107/* 109/*
108 * Copy from radeon_drv.h so we don't have to include both and have conflicting 110 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -304,6 +306,9 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *r
304 u16 *vddc, u16 *vddci, 306 u16 *vddc, u16 *vddci,
305 u16 virtual_voltage_id, 307 u16 virtual_voltage_id,
306 u16 vbios_voltage_id); 308 u16 vbios_voltage_id);
309int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
310 u16 virtual_voltage_id,
311 u16 *voltage);
307int radeon_atom_round_to_true_voltage(struct radeon_device *rdev, 312int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
308 u8 voltage_type, 313 u8 voltage_type,
309 u16 nominal_voltage, 314 u16 nominal_voltage,
@@ -317,6 +322,9 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
317 struct atom_voltage_table *voltage_table); 322 struct atom_voltage_table *voltage_table);
318bool radeon_atom_is_voltage_gpio(struct radeon_device *rdev, 323bool radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
319 u8 voltage_type, u8 voltage_mode); 324 u8 voltage_type, u8 voltage_mode);
325int radeon_atom_get_svi2_info(struct radeon_device *rdev,
326 u8 voltage_type,
327 u8 *svd_gpio_id, u8 *svc_gpio_id);
320void radeon_atom_update_memory_dll(struct radeon_device *rdev, 328void radeon_atom_update_memory_dll(struct radeon_device *rdev,
321 u32 mem_clock); 329 u32 mem_clock);
322void radeon_atom_set_ac_timing(struct radeon_device *rdev, 330void radeon_atom_set_ac_timing(struct radeon_device *rdev,
@@ -441,14 +449,12 @@ struct radeon_mman {
441struct radeon_bo_va { 449struct radeon_bo_va {
442 /* protected by bo being reserved */ 450 /* protected by bo being reserved */
443 struct list_head bo_list; 451 struct list_head bo_list;
444 uint64_t soffset;
445 uint64_t eoffset;
446 uint32_t flags; 452 uint32_t flags;
447 bool valid; 453 uint64_t addr;
448 unsigned ref_count; 454 unsigned ref_count;
449 455
450 /* protected by vm mutex */ 456 /* protected by vm mutex */
451 struct list_head vm_list; 457 struct interval_tree_node it;
452 struct list_head vm_status; 458 struct list_head vm_status;
453 459
454 /* constant after initialization */ 460 /* constant after initialization */
@@ -465,6 +471,7 @@ struct radeon_bo {
465 struct ttm_placement placement; 471 struct ttm_placement placement;
466 struct ttm_buffer_object tbo; 472 struct ttm_buffer_object tbo;
467 struct ttm_bo_kmap_obj kmap; 473 struct ttm_bo_kmap_obj kmap;
474 u32 flags;
468 unsigned pin_count; 475 unsigned pin_count;
469 void *kptr; 476 void *kptr;
470 u32 tiling_flags; 477 u32 tiling_flags;
@@ -543,9 +550,9 @@ struct radeon_gem {
543 550
544int radeon_gem_init(struct radeon_device *rdev); 551int radeon_gem_init(struct radeon_device *rdev);
545void radeon_gem_fini(struct radeon_device *rdev); 552void radeon_gem_fini(struct radeon_device *rdev);
546int radeon_gem_object_create(struct radeon_device *rdev, int size, 553int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
547 int alignment, int initial_domain, 554 int alignment, int initial_domain,
548 bool discardable, bool kernel, 555 u32 flags, bool kernel,
549 struct drm_gem_object **obj); 556 struct drm_gem_object **obj);
550 557
551int radeon_mode_dumb_create(struct drm_file *file_priv, 558int radeon_mode_dumb_create(struct drm_file *file_priv,
@@ -590,6 +597,12 @@ struct radeon_mc;
590#define RADEON_GPU_PAGE_SHIFT 12 597#define RADEON_GPU_PAGE_SHIFT 12
591#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK) 598#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
592 599
600#define RADEON_GART_PAGE_DUMMY 0
601#define RADEON_GART_PAGE_VALID (1 << 0)
602#define RADEON_GART_PAGE_READ (1 << 1)
603#define RADEON_GART_PAGE_WRITE (1 << 2)
604#define RADEON_GART_PAGE_SNOOP (1 << 3)
605
593struct radeon_gart { 606struct radeon_gart {
594 dma_addr_t table_addr; 607 dma_addr_t table_addr;
595 struct radeon_bo *robj; 608 struct radeon_bo *robj;
@@ -614,8 +627,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
614 int pages); 627 int pages);
615int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, 628int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
616 int pages, struct page **pagelist, 629 int pages, struct page **pagelist,
617 dma_addr_t *dma_addr); 630 dma_addr_t *dma_addr, uint32_t flags);
618void radeon_gart_restore(struct radeon_device *rdev);
619 631
620 632
621/* 633/*
@@ -855,9 +867,9 @@ struct radeon_mec {
855#define R600_PTE_FRAG_64KB (4 << 7) 867#define R600_PTE_FRAG_64KB (4 << 7)
856#define R600_PTE_FRAG_256KB (6 << 7) 868#define R600_PTE_FRAG_256KB (6 << 7)
857 869
858/* flags used for GART page table entries on R600+ */ 870/* flags needed to be set so we can copy directly from the GART table */
859#define R600_PTE_GART ( R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED \ 871#define R600_PTE_GART_MASK ( R600_PTE_READABLE | R600_PTE_WRITEABLE | \
860 | R600_PTE_READABLE | R600_PTE_WRITEABLE) 872 R600_PTE_SYSTEM | R600_PTE_VALID )
861 873
862struct radeon_vm_pt { 874struct radeon_vm_pt {
863 struct radeon_bo *bo; 875 struct radeon_bo *bo;
@@ -865,9 +877,12 @@ struct radeon_vm_pt {
865}; 877};
866 878
867struct radeon_vm { 879struct radeon_vm {
868 struct list_head va; 880 struct rb_root va;
869 unsigned id; 881 unsigned id;
870 882
883 /* BOs moved, but not yet updated in the PT */
884 struct list_head invalidated;
885
871 /* BOs freed, but not yet updated in the PT */ 886 /* BOs freed, but not yet updated in the PT */
872 struct list_head freed; 887 struct list_head freed;
873 888
@@ -1740,6 +1755,7 @@ struct radeon_asic_ring {
1740 /* command emmit functions */ 1755 /* command emmit functions */
1741 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); 1756 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
1742 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); 1757 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
1758 void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
1743 bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, 1759 bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
1744 struct radeon_semaphore *semaphore, bool emit_wait); 1760 struct radeon_semaphore *semaphore, bool emit_wait);
1745 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 1761 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
@@ -1763,13 +1779,8 @@ struct radeon_asic {
1763 int (*suspend)(struct radeon_device *rdev); 1779 int (*suspend)(struct radeon_device *rdev);
1764 void (*vga_set_state)(struct radeon_device *rdev, bool state); 1780 void (*vga_set_state)(struct radeon_device *rdev, bool state);
1765 int (*asic_reset)(struct radeon_device *rdev); 1781 int (*asic_reset)(struct radeon_device *rdev);
1766 /* ioctl hw specific callback. Some hw might want to perform special 1782 /* Flush the HDP cache via MMIO */
1767 * operation on specific ioctl. For instance on wait idle some hw 1783 void (*mmio_hdp_flush)(struct radeon_device *rdev);
1768 * might want to perform and HDP flush through MMIO as it seems that
1769 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
1770 * through ring.
1771 */
1772 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
1773 /* check if 3D engine is idle */ 1784 /* check if 3D engine is idle */
1774 bool (*gui_idle)(struct radeon_device *rdev); 1785 bool (*gui_idle)(struct radeon_device *rdev);
1775 /* wait for mc_idle */ 1786 /* wait for mc_idle */
@@ -1782,16 +1793,26 @@ struct radeon_asic {
1782 struct { 1793 struct {
1783 void (*tlb_flush)(struct radeon_device *rdev); 1794 void (*tlb_flush)(struct radeon_device *rdev);
1784 void (*set_page)(struct radeon_device *rdev, unsigned i, 1795 void (*set_page)(struct radeon_device *rdev, unsigned i,
1785 uint64_t addr); 1796 uint64_t addr, uint32_t flags);
1786 } gart; 1797 } gart;
1787 struct { 1798 struct {
1788 int (*init)(struct radeon_device *rdev); 1799 int (*init)(struct radeon_device *rdev);
1789 void (*fini)(struct radeon_device *rdev); 1800 void (*fini)(struct radeon_device *rdev);
1790 void (*set_page)(struct radeon_device *rdev, 1801 void (*copy_pages)(struct radeon_device *rdev,
1791 struct radeon_ib *ib, 1802 struct radeon_ib *ib,
1792 uint64_t pe, 1803 uint64_t pe, uint64_t src,
1793 uint64_t addr, unsigned count, 1804 unsigned count);
1794 uint32_t incr, uint32_t flags); 1805 void (*write_pages)(struct radeon_device *rdev,
1806 struct radeon_ib *ib,
1807 uint64_t pe,
1808 uint64_t addr, unsigned count,
1809 uint32_t incr, uint32_t flags);
1810 void (*set_pages)(struct radeon_device *rdev,
1811 struct radeon_ib *ib,
1812 uint64_t pe,
1813 uint64_t addr, unsigned count,
1814 uint32_t incr, uint32_t flags);
1815 void (*pad_ib)(struct radeon_ib *ib);
1795 } vm; 1816 } vm;
1796 /* ring specific callbacks */ 1817 /* ring specific callbacks */
1797 struct radeon_asic_ring *ring[RADEON_NUM_RINGS]; 1818 struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
@@ -2299,10 +2320,12 @@ struct radeon_device {
2299 const struct firmware *mc_fw; /* NI MC firmware */ 2320 const struct firmware *mc_fw; /* NI MC firmware */
2300 const struct firmware *ce_fw; /* SI CE firmware */ 2321 const struct firmware *ce_fw; /* SI CE firmware */
2301 const struct firmware *mec_fw; /* CIK MEC firmware */ 2322 const struct firmware *mec_fw; /* CIK MEC firmware */
2323 const struct firmware *mec2_fw; /* KV MEC2 firmware */
2302 const struct firmware *sdma_fw; /* CIK SDMA firmware */ 2324 const struct firmware *sdma_fw; /* CIK SDMA firmware */
2303 const struct firmware *smc_fw; /* SMC firmware */ 2325 const struct firmware *smc_fw; /* SMC firmware */
2304 const struct firmware *uvd_fw; /* UVD firmware */ 2326 const struct firmware *uvd_fw; /* UVD firmware */
2305 const struct firmware *vce_fw; /* VCE firmware */ 2327 const struct firmware *vce_fw; /* VCE firmware */
2328 bool new_fw;
2306 struct r600_vram_scratch vram_scratch; 2329 struct r600_vram_scratch vram_scratch;
2307 int msi_enabled; /* msi enabled */ 2330 int msi_enabled; /* msi enabled */
2308 struct r600_ih ih; /* r6/700 interrupt ring */ 2331 struct r600_ih ih; /* r6/700 interrupt ring */
@@ -2342,6 +2365,11 @@ struct radeon_device {
2342 2365
2343 struct dev_pm_domain vga_pm_domain; 2366 struct dev_pm_domain vga_pm_domain;
2344 bool have_disp_power_ref; 2367 bool have_disp_power_ref;
2368 u32 px_quirk_flags;
2369
2370 /* tracking pinned memory */
2371 u64 vram_pin_size;
2372 u64 gart_pin_size;
2345}; 2373};
2346 2374
2347bool radeon_is_px(struct drm_device *dev); 2375bool radeon_is_px(struct drm_device *dev);
@@ -2352,10 +2380,42 @@ int radeon_device_init(struct radeon_device *rdev,
2352void radeon_device_fini(struct radeon_device *rdev); 2380void radeon_device_fini(struct radeon_device *rdev);
2353int radeon_gpu_wait_for_idle(struct radeon_device *rdev); 2381int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
2354 2382
2355uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, 2383#define RADEON_MIN_MMIO_SIZE 0x10000
2356 bool always_indirect); 2384
2357void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, 2385static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
2358 bool always_indirect); 2386 bool always_indirect)
2387{
2388 /* The mmio size is 64kb at minimum. Allows the if to be optimized out. */
2389 if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
2390 return readl(((void __iomem *)rdev->rmmio) + reg);
2391 else {
2392 unsigned long flags;
2393 uint32_t ret;
2394
2395 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
2396 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
2397 ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
2398 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
2399
2400 return ret;
2401 }
2402}
2403
2404static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
2405 bool always_indirect)
2406{
2407 if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
2408 writel(v, ((void __iomem *)rdev->rmmio) + reg);
2409 else {
2410 unsigned long flags;
2411
2412 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
2413 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
2414 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
2415 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
2416 }
2417}
2418
2359u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); 2419u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
2360void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); 2420void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2361 2421
@@ -2709,10 +2769,13 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
2709#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 2769#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
2710#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) 2770#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
2711#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) 2771#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
2712#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p)) 2772#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
2713#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) 2773#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
2714#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) 2774#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
2715#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags))) 2775#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
2776#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
2777#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
2778#define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib)))
2716#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp)) 2779#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
2717#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp)) 2780#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
2718#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp)) 2781#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
@@ -2840,6 +2903,8 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
2840 struct radeon_vm *vm); 2903 struct radeon_vm *vm);
2841int radeon_vm_clear_freed(struct radeon_device *rdev, 2904int radeon_vm_clear_freed(struct radeon_device *rdev,
2842 struct radeon_vm *vm); 2905 struct radeon_vm *vm);
2906int radeon_vm_clear_invalids(struct radeon_device *rdev,
2907 struct radeon_vm *vm);
2843int radeon_vm_bo_update(struct radeon_device *rdev, 2908int radeon_vm_bo_update(struct radeon_device *rdev,
2844 struct radeon_bo_va *bo_va, 2909 struct radeon_bo_va *bo_va,
2845 struct ttm_mem_reg *mem); 2910 struct ttm_mem_reg *mem);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 34b9aa9e3c06..eeeeabe09758 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -185,6 +185,7 @@ static struct radeon_asic_ring r100_gfx_ring = {
185 .get_rptr = &r100_gfx_get_rptr, 185 .get_rptr = &r100_gfx_get_rptr,
186 .get_wptr = &r100_gfx_get_wptr, 186 .get_wptr = &r100_gfx_get_wptr,
187 .set_wptr = &r100_gfx_set_wptr, 187 .set_wptr = &r100_gfx_set_wptr,
188 .hdp_flush = &r100_ring_hdp_flush,
188}; 189};
189 190
190static struct radeon_asic r100_asic = { 191static struct radeon_asic r100_asic = {
@@ -194,7 +195,7 @@ static struct radeon_asic r100_asic = {
194 .resume = &r100_resume, 195 .resume = &r100_resume,
195 .vga_set_state = &r100_vga_set_state, 196 .vga_set_state = &r100_vga_set_state,
196 .asic_reset = &r100_asic_reset, 197 .asic_reset = &r100_asic_reset,
197 .ioctl_wait_idle = NULL, 198 .mmio_hdp_flush = NULL,
198 .gui_idle = &r100_gui_idle, 199 .gui_idle = &r100_gui_idle,
199 .mc_wait_for_idle = &r100_mc_wait_for_idle, 200 .mc_wait_for_idle = &r100_mc_wait_for_idle,
200 .gart = { 201 .gart = {
@@ -260,7 +261,7 @@ static struct radeon_asic r200_asic = {
260 .resume = &r100_resume, 261 .resume = &r100_resume,
261 .vga_set_state = &r100_vga_set_state, 262 .vga_set_state = &r100_vga_set_state,
262 .asic_reset = &r100_asic_reset, 263 .asic_reset = &r100_asic_reset,
263 .ioctl_wait_idle = NULL, 264 .mmio_hdp_flush = NULL,
264 .gui_idle = &r100_gui_idle, 265 .gui_idle = &r100_gui_idle,
265 .mc_wait_for_idle = &r100_mc_wait_for_idle, 266 .mc_wait_for_idle = &r100_mc_wait_for_idle,
266 .gart = { 267 .gart = {
@@ -331,6 +332,7 @@ static struct radeon_asic_ring r300_gfx_ring = {
331 .get_rptr = &r100_gfx_get_rptr, 332 .get_rptr = &r100_gfx_get_rptr,
332 .get_wptr = &r100_gfx_get_wptr, 333 .get_wptr = &r100_gfx_get_wptr,
333 .set_wptr = &r100_gfx_set_wptr, 334 .set_wptr = &r100_gfx_set_wptr,
335 .hdp_flush = &r100_ring_hdp_flush,
334}; 336};
335 337
336static struct radeon_asic r300_asic = { 338static struct radeon_asic r300_asic = {
@@ -340,7 +342,7 @@ static struct radeon_asic r300_asic = {
340 .resume = &r300_resume, 342 .resume = &r300_resume,
341 .vga_set_state = &r100_vga_set_state, 343 .vga_set_state = &r100_vga_set_state,
342 .asic_reset = &r300_asic_reset, 344 .asic_reset = &r300_asic_reset,
343 .ioctl_wait_idle = NULL, 345 .mmio_hdp_flush = NULL,
344 .gui_idle = &r100_gui_idle, 346 .gui_idle = &r100_gui_idle,
345 .mc_wait_for_idle = &r300_mc_wait_for_idle, 347 .mc_wait_for_idle = &r300_mc_wait_for_idle,
346 .gart = { 348 .gart = {
@@ -406,7 +408,7 @@ static struct radeon_asic r300_asic_pcie = {
406 .resume = &r300_resume, 408 .resume = &r300_resume,
407 .vga_set_state = &r100_vga_set_state, 409 .vga_set_state = &r100_vga_set_state,
408 .asic_reset = &r300_asic_reset, 410 .asic_reset = &r300_asic_reset,
409 .ioctl_wait_idle = NULL, 411 .mmio_hdp_flush = NULL,
410 .gui_idle = &r100_gui_idle, 412 .gui_idle = &r100_gui_idle,
411 .mc_wait_for_idle = &r300_mc_wait_for_idle, 413 .mc_wait_for_idle = &r300_mc_wait_for_idle,
412 .gart = { 414 .gart = {
@@ -472,7 +474,7 @@ static struct radeon_asic r420_asic = {
472 .resume = &r420_resume, 474 .resume = &r420_resume,
473 .vga_set_state = &r100_vga_set_state, 475 .vga_set_state = &r100_vga_set_state,
474 .asic_reset = &r300_asic_reset, 476 .asic_reset = &r300_asic_reset,
475 .ioctl_wait_idle = NULL, 477 .mmio_hdp_flush = NULL,
476 .gui_idle = &r100_gui_idle, 478 .gui_idle = &r100_gui_idle,
477 .mc_wait_for_idle = &r300_mc_wait_for_idle, 479 .mc_wait_for_idle = &r300_mc_wait_for_idle,
478 .gart = { 480 .gart = {
@@ -538,7 +540,7 @@ static struct radeon_asic rs400_asic = {
538 .resume = &rs400_resume, 540 .resume = &rs400_resume,
539 .vga_set_state = &r100_vga_set_state, 541 .vga_set_state = &r100_vga_set_state,
540 .asic_reset = &r300_asic_reset, 542 .asic_reset = &r300_asic_reset,
541 .ioctl_wait_idle = NULL, 543 .mmio_hdp_flush = NULL,
542 .gui_idle = &r100_gui_idle, 544 .gui_idle = &r100_gui_idle,
543 .mc_wait_for_idle = &rs400_mc_wait_for_idle, 545 .mc_wait_for_idle = &rs400_mc_wait_for_idle,
544 .gart = { 546 .gart = {
@@ -604,7 +606,7 @@ static struct radeon_asic rs600_asic = {
604 .resume = &rs600_resume, 606 .resume = &rs600_resume,
605 .vga_set_state = &r100_vga_set_state, 607 .vga_set_state = &r100_vga_set_state,
606 .asic_reset = &rs600_asic_reset, 608 .asic_reset = &rs600_asic_reset,
607 .ioctl_wait_idle = NULL, 609 .mmio_hdp_flush = NULL,
608 .gui_idle = &r100_gui_idle, 610 .gui_idle = &r100_gui_idle,
609 .mc_wait_for_idle = &rs600_mc_wait_for_idle, 611 .mc_wait_for_idle = &rs600_mc_wait_for_idle,
610 .gart = { 612 .gart = {
@@ -672,7 +674,7 @@ static struct radeon_asic rs690_asic = {
672 .resume = &rs690_resume, 674 .resume = &rs690_resume,
673 .vga_set_state = &r100_vga_set_state, 675 .vga_set_state = &r100_vga_set_state,
674 .asic_reset = &rs600_asic_reset, 676 .asic_reset = &rs600_asic_reset,
675 .ioctl_wait_idle = NULL, 677 .mmio_hdp_flush = NULL,
676 .gui_idle = &r100_gui_idle, 678 .gui_idle = &r100_gui_idle,
677 .mc_wait_for_idle = &rs690_mc_wait_for_idle, 679 .mc_wait_for_idle = &rs690_mc_wait_for_idle,
678 .gart = { 680 .gart = {
@@ -740,7 +742,7 @@ static struct radeon_asic rv515_asic = {
740 .resume = &rv515_resume, 742 .resume = &rv515_resume,
741 .vga_set_state = &r100_vga_set_state, 743 .vga_set_state = &r100_vga_set_state,
742 .asic_reset = &rs600_asic_reset, 744 .asic_reset = &rs600_asic_reset,
743 .ioctl_wait_idle = NULL, 745 .mmio_hdp_flush = NULL,
744 .gui_idle = &r100_gui_idle, 746 .gui_idle = &r100_gui_idle,
745 .mc_wait_for_idle = &rv515_mc_wait_for_idle, 747 .mc_wait_for_idle = &rv515_mc_wait_for_idle,
746 .gart = { 748 .gart = {
@@ -806,7 +808,7 @@ static struct radeon_asic r520_asic = {
806 .resume = &r520_resume, 808 .resume = &r520_resume,
807 .vga_set_state = &r100_vga_set_state, 809 .vga_set_state = &r100_vga_set_state,
808 .asic_reset = &rs600_asic_reset, 810 .asic_reset = &rs600_asic_reset,
809 .ioctl_wait_idle = NULL, 811 .mmio_hdp_flush = NULL,
810 .gui_idle = &r100_gui_idle, 812 .gui_idle = &r100_gui_idle,
811 .mc_wait_for_idle = &r520_mc_wait_for_idle, 813 .mc_wait_for_idle = &r520_mc_wait_for_idle,
812 .gart = { 814 .gart = {
@@ -898,7 +900,7 @@ static struct radeon_asic r600_asic = {
898 .resume = &r600_resume, 900 .resume = &r600_resume,
899 .vga_set_state = &r600_vga_set_state, 901 .vga_set_state = &r600_vga_set_state,
900 .asic_reset = &r600_asic_reset, 902 .asic_reset = &r600_asic_reset,
901 .ioctl_wait_idle = r600_ioctl_wait_idle, 903 .mmio_hdp_flush = r600_mmio_hdp_flush,
902 .gui_idle = &r600_gui_idle, 904 .gui_idle = &r600_gui_idle,
903 .mc_wait_for_idle = &r600_mc_wait_for_idle, 905 .mc_wait_for_idle = &r600_mc_wait_for_idle,
904 .get_xclk = &r600_get_xclk, 906 .get_xclk = &r600_get_xclk,
@@ -970,7 +972,7 @@ static struct radeon_asic rv6xx_asic = {
970 .resume = &r600_resume, 972 .resume = &r600_resume,
971 .vga_set_state = &r600_vga_set_state, 973 .vga_set_state = &r600_vga_set_state,
972 .asic_reset = &r600_asic_reset, 974 .asic_reset = &r600_asic_reset,
973 .ioctl_wait_idle = r600_ioctl_wait_idle, 975 .mmio_hdp_flush = r600_mmio_hdp_flush,
974 .gui_idle = &r600_gui_idle, 976 .gui_idle = &r600_gui_idle,
975 .mc_wait_for_idle = &r600_mc_wait_for_idle, 977 .mc_wait_for_idle = &r600_mc_wait_for_idle,
976 .get_xclk = &r600_get_xclk, 978 .get_xclk = &r600_get_xclk,
@@ -1060,7 +1062,7 @@ static struct radeon_asic rs780_asic = {
1060 .resume = &r600_resume, 1062 .resume = &r600_resume,
1061 .vga_set_state = &r600_vga_set_state, 1063 .vga_set_state = &r600_vga_set_state,
1062 .asic_reset = &r600_asic_reset, 1064 .asic_reset = &r600_asic_reset,
1063 .ioctl_wait_idle = r600_ioctl_wait_idle, 1065 .mmio_hdp_flush = r600_mmio_hdp_flush,
1064 .gui_idle = &r600_gui_idle, 1066 .gui_idle = &r600_gui_idle,
1065 .mc_wait_for_idle = &r600_mc_wait_for_idle, 1067 .mc_wait_for_idle = &r600_mc_wait_for_idle,
1066 .get_xclk = &r600_get_xclk, 1068 .get_xclk = &r600_get_xclk,
@@ -1163,7 +1165,7 @@ static struct radeon_asic rv770_asic = {
1163 .resume = &rv770_resume, 1165 .resume = &rv770_resume,
1164 .asic_reset = &r600_asic_reset, 1166 .asic_reset = &r600_asic_reset,
1165 .vga_set_state = &r600_vga_set_state, 1167 .vga_set_state = &r600_vga_set_state,
1166 .ioctl_wait_idle = r600_ioctl_wait_idle, 1168 .mmio_hdp_flush = r600_mmio_hdp_flush,
1167 .gui_idle = &r600_gui_idle, 1169 .gui_idle = &r600_gui_idle,
1168 .mc_wait_for_idle = &r600_mc_wait_for_idle, 1170 .mc_wait_for_idle = &r600_mc_wait_for_idle,
1169 .get_xclk = &rv770_get_xclk, 1171 .get_xclk = &rv770_get_xclk,
@@ -1281,7 +1283,7 @@ static struct radeon_asic evergreen_asic = {
1281 .resume = &evergreen_resume, 1283 .resume = &evergreen_resume,
1282 .asic_reset = &evergreen_asic_reset, 1284 .asic_reset = &evergreen_asic_reset,
1283 .vga_set_state = &r600_vga_set_state, 1285 .vga_set_state = &r600_vga_set_state,
1284 .ioctl_wait_idle = r600_ioctl_wait_idle, 1286 .mmio_hdp_flush = r600_mmio_hdp_flush,
1285 .gui_idle = &r600_gui_idle, 1287 .gui_idle = &r600_gui_idle,
1286 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 1288 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1287 .get_xclk = &rv770_get_xclk, 1289 .get_xclk = &rv770_get_xclk,
@@ -1373,7 +1375,7 @@ static struct radeon_asic sumo_asic = {
1373 .resume = &evergreen_resume, 1375 .resume = &evergreen_resume,
1374 .asic_reset = &evergreen_asic_reset, 1376 .asic_reset = &evergreen_asic_reset,
1375 .vga_set_state = &r600_vga_set_state, 1377 .vga_set_state = &r600_vga_set_state,
1376 .ioctl_wait_idle = r600_ioctl_wait_idle, 1378 .mmio_hdp_flush = r600_mmio_hdp_flush,
1377 .gui_idle = &r600_gui_idle, 1379 .gui_idle = &r600_gui_idle,
1378 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 1380 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1379 .get_xclk = &r600_get_xclk, 1381 .get_xclk = &r600_get_xclk,
@@ -1464,7 +1466,7 @@ static struct radeon_asic btc_asic = {
1464 .resume = &evergreen_resume, 1466 .resume = &evergreen_resume,
1465 .asic_reset = &evergreen_asic_reset, 1467 .asic_reset = &evergreen_asic_reset,
1466 .vga_set_state = &r600_vga_set_state, 1468 .vga_set_state = &r600_vga_set_state,
1467 .ioctl_wait_idle = r600_ioctl_wait_idle, 1469 .mmio_hdp_flush = r600_mmio_hdp_flush,
1468 .gui_idle = &r600_gui_idle, 1470 .gui_idle = &r600_gui_idle,
1469 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 1471 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1470 .get_xclk = &rv770_get_xclk, 1472 .get_xclk = &rv770_get_xclk,
@@ -1599,7 +1601,7 @@ static struct radeon_asic cayman_asic = {
1599 .resume = &cayman_resume, 1601 .resume = &cayman_resume,
1600 .asic_reset = &cayman_asic_reset, 1602 .asic_reset = &cayman_asic_reset,
1601 .vga_set_state = &r600_vga_set_state, 1603 .vga_set_state = &r600_vga_set_state,
1602 .ioctl_wait_idle = r600_ioctl_wait_idle, 1604 .mmio_hdp_flush = r600_mmio_hdp_flush,
1603 .gui_idle = &r600_gui_idle, 1605 .gui_idle = &r600_gui_idle,
1604 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 1606 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1605 .get_xclk = &rv770_get_xclk, 1607 .get_xclk = &rv770_get_xclk,
@@ -1611,7 +1613,10 @@ static struct radeon_asic cayman_asic = {
1611 .vm = { 1613 .vm = {
1612 .init = &cayman_vm_init, 1614 .init = &cayman_vm_init,
1613 .fini = &cayman_vm_fini, 1615 .fini = &cayman_vm_fini,
1614 .set_page = &cayman_dma_vm_set_page, 1616 .copy_pages = &cayman_dma_vm_copy_pages,
1617 .write_pages = &cayman_dma_vm_write_pages,
1618 .set_pages = &cayman_dma_vm_set_pages,
1619 .pad_ib = &cayman_dma_vm_pad_ib,
1615 }, 1620 },
1616 .ring = { 1621 .ring = {
1617 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, 1622 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1699,7 +1704,7 @@ static struct radeon_asic trinity_asic = {
1699 .resume = &cayman_resume, 1704 .resume = &cayman_resume,
1700 .asic_reset = &cayman_asic_reset, 1705 .asic_reset = &cayman_asic_reset,
1701 .vga_set_state = &r600_vga_set_state, 1706 .vga_set_state = &r600_vga_set_state,
1702 .ioctl_wait_idle = r600_ioctl_wait_idle, 1707 .mmio_hdp_flush = r600_mmio_hdp_flush,
1703 .gui_idle = &r600_gui_idle, 1708 .gui_idle = &r600_gui_idle,
1704 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 1709 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1705 .get_xclk = &r600_get_xclk, 1710 .get_xclk = &r600_get_xclk,
@@ -1711,7 +1716,10 @@ static struct radeon_asic trinity_asic = {
1711 .vm = { 1716 .vm = {
1712 .init = &cayman_vm_init, 1717 .init = &cayman_vm_init,
1713 .fini = &cayman_vm_fini, 1718 .fini = &cayman_vm_fini,
1714 .set_page = &cayman_dma_vm_set_page, 1719 .copy_pages = &cayman_dma_vm_copy_pages,
1720 .write_pages = &cayman_dma_vm_write_pages,
1721 .set_pages = &cayman_dma_vm_set_pages,
1722 .pad_ib = &cayman_dma_vm_pad_ib,
1715 }, 1723 },
1716 .ring = { 1724 .ring = {
1717 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, 1725 [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1829,7 +1837,7 @@ static struct radeon_asic si_asic = {
1829 .resume = &si_resume, 1837 .resume = &si_resume,
1830 .asic_reset = &si_asic_reset, 1838 .asic_reset = &si_asic_reset,
1831 .vga_set_state = &r600_vga_set_state, 1839 .vga_set_state = &r600_vga_set_state,
1832 .ioctl_wait_idle = r600_ioctl_wait_idle, 1840 .mmio_hdp_flush = r600_mmio_hdp_flush,
1833 .gui_idle = &r600_gui_idle, 1841 .gui_idle = &r600_gui_idle,
1834 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 1842 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1835 .get_xclk = &si_get_xclk, 1843 .get_xclk = &si_get_xclk,
@@ -1841,7 +1849,10 @@ static struct radeon_asic si_asic = {
1841 .vm = { 1849 .vm = {
1842 .init = &si_vm_init, 1850 .init = &si_vm_init,
1843 .fini = &si_vm_fini, 1851 .fini = &si_vm_fini,
1844 .set_page = &si_dma_vm_set_page, 1852 .copy_pages = &si_dma_vm_copy_pages,
1853 .write_pages = &si_dma_vm_write_pages,
1854 .set_pages = &si_dma_vm_set_pages,
1855 .pad_ib = &cayman_dma_vm_pad_ib,
1845 }, 1856 },
1846 .ring = { 1857 .ring = {
1847 [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring, 1858 [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
@@ -1987,7 +1998,7 @@ static struct radeon_asic ci_asic = {
1987 .resume = &cik_resume, 1998 .resume = &cik_resume,
1988 .asic_reset = &cik_asic_reset, 1999 .asic_reset = &cik_asic_reset,
1989 .vga_set_state = &r600_vga_set_state, 2000 .vga_set_state = &r600_vga_set_state,
1990 .ioctl_wait_idle = NULL, 2001 .mmio_hdp_flush = &r600_mmio_hdp_flush,
1991 .gui_idle = &r600_gui_idle, 2002 .gui_idle = &r600_gui_idle,
1992 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 2003 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1993 .get_xclk = &cik_get_xclk, 2004 .get_xclk = &cik_get_xclk,
@@ -1999,7 +2010,10 @@ static struct radeon_asic ci_asic = {
1999 .vm = { 2010 .vm = {
2000 .init = &cik_vm_init, 2011 .init = &cik_vm_init,
2001 .fini = &cik_vm_fini, 2012 .fini = &cik_vm_fini,
2002 .set_page = &cik_sdma_vm_set_page, 2013 .copy_pages = &cik_sdma_vm_copy_pages,
2014 .write_pages = &cik_sdma_vm_write_pages,
2015 .set_pages = &cik_sdma_vm_set_pages,
2016 .pad_ib = &cik_sdma_vm_pad_ib,
2003 }, 2017 },
2004 .ring = { 2018 .ring = {
2005 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, 2019 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2091,7 +2105,7 @@ static struct radeon_asic kv_asic = {
2091 .resume = &cik_resume, 2105 .resume = &cik_resume,
2092 .asic_reset = &cik_asic_reset, 2106 .asic_reset = &cik_asic_reset,
2093 .vga_set_state = &r600_vga_set_state, 2107 .vga_set_state = &r600_vga_set_state,
2094 .ioctl_wait_idle = NULL, 2108 .mmio_hdp_flush = &r600_mmio_hdp_flush,
2095 .gui_idle = &r600_gui_idle, 2109 .gui_idle = &r600_gui_idle,
2096 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 2110 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
2097 .get_xclk = &cik_get_xclk, 2111 .get_xclk = &cik_get_xclk,
@@ -2103,7 +2117,10 @@ static struct radeon_asic kv_asic = {
2103 .vm = { 2117 .vm = {
2104 .init = &cik_vm_init, 2118 .init = &cik_vm_init,
2105 .fini = &cik_vm_fini, 2119 .fini = &cik_vm_fini,
2106 .set_page = &cik_sdma_vm_set_page, 2120 .copy_pages = &cik_sdma_vm_copy_pages,
2121 .write_pages = &cik_sdma_vm_write_pages,
2122 .set_pages = &cik_sdma_vm_set_pages,
2123 .pad_ib = &cik_sdma_vm_pad_ib,
2107 }, 2124 },
2108 .ring = { 2125 .ring = {
2109 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, 2126 [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2457,7 +2474,7 @@ int radeon_asic_init(struct radeon_device *rdev)
2457 rdev->cg_flags = 2474 rdev->cg_flags =
2458 RADEON_CG_SUPPORT_GFX_MGCG | 2475 RADEON_CG_SUPPORT_GFX_MGCG |
2459 RADEON_CG_SUPPORT_GFX_MGLS | 2476 RADEON_CG_SUPPORT_GFX_MGLS |
2460 RADEON_CG_SUPPORT_GFX_CGCG | 2477 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2461 RADEON_CG_SUPPORT_GFX_CGLS | 2478 RADEON_CG_SUPPORT_GFX_CGLS |
2462 RADEON_CG_SUPPORT_GFX_CGTS | 2479 RADEON_CG_SUPPORT_GFX_CGTS |
2463 RADEON_CG_SUPPORT_GFX_CGTS_LS | 2480 RADEON_CG_SUPPORT_GFX_CGTS_LS |
@@ -2476,7 +2493,7 @@ int radeon_asic_init(struct radeon_device *rdev)
2476 rdev->cg_flags = 2493 rdev->cg_flags =
2477 RADEON_CG_SUPPORT_GFX_MGCG | 2494 RADEON_CG_SUPPORT_GFX_MGCG |
2478 RADEON_CG_SUPPORT_GFX_MGLS | 2495 RADEON_CG_SUPPORT_GFX_MGLS |
2479 RADEON_CG_SUPPORT_GFX_CGCG | 2496 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2480 RADEON_CG_SUPPORT_GFX_CGLS | 2497 RADEON_CG_SUPPORT_GFX_CGLS |
2481 RADEON_CG_SUPPORT_GFX_CGTS | 2498 RADEON_CG_SUPPORT_GFX_CGTS |
2482 RADEON_CG_SUPPORT_GFX_CP_LS | 2499 RADEON_CG_SUPPORT_GFX_CP_LS |
@@ -2502,7 +2519,7 @@ int radeon_asic_init(struct radeon_device *rdev)
2502 rdev->cg_flags = 2519 rdev->cg_flags =
2503 RADEON_CG_SUPPORT_GFX_MGCG | 2520 RADEON_CG_SUPPORT_GFX_MGCG |
2504 RADEON_CG_SUPPORT_GFX_MGLS | 2521 RADEON_CG_SUPPORT_GFX_MGLS |
2505 RADEON_CG_SUPPORT_GFX_CGCG | 2522 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2506 RADEON_CG_SUPPORT_GFX_CGLS | 2523 RADEON_CG_SUPPORT_GFX_CGLS |
2507 RADEON_CG_SUPPORT_GFX_CGTS | 2524 RADEON_CG_SUPPORT_GFX_CGTS |
2508 RADEON_CG_SUPPORT_GFX_CGTS_LS | 2525 RADEON_CG_SUPPORT_GFX_CGTS_LS |
@@ -2530,7 +2547,7 @@ int radeon_asic_init(struct radeon_device *rdev)
2530 rdev->cg_flags = 2547 rdev->cg_flags =
2531 RADEON_CG_SUPPORT_GFX_MGCG | 2548 RADEON_CG_SUPPORT_GFX_MGCG |
2532 RADEON_CG_SUPPORT_GFX_MGLS | 2549 RADEON_CG_SUPPORT_GFX_MGLS |
2533 RADEON_CG_SUPPORT_GFX_CGCG | 2550 /*RADEON_CG_SUPPORT_GFX_CGCG |*/
2534 RADEON_CG_SUPPORT_GFX_CGLS | 2551 RADEON_CG_SUPPORT_GFX_CGLS |
2535 RADEON_CG_SUPPORT_GFX_CGTS | 2552 RADEON_CG_SUPPORT_GFX_CGTS |
2536 RADEON_CG_SUPPORT_GFX_CGTS_LS | 2553 RADEON_CG_SUPPORT_GFX_CGTS_LS |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 01e7c0ad8f01..275a5dc01780 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -68,7 +68,7 @@ int r100_asic_reset(struct radeon_device *rdev);
68u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); 68u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
69void r100_pci_gart_tlb_flush(struct radeon_device *rdev); 69void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
70void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, 70void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
71 uint64_t addr); 71 uint64_t addr, uint32_t flags);
72void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); 72void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
73int r100_irq_set(struct radeon_device *rdev); 73int r100_irq_set(struct radeon_device *rdev);
74int r100_irq_process(struct radeon_device *rdev); 74int r100_irq_process(struct radeon_device *rdev);
@@ -148,7 +148,8 @@ u32 r100_gfx_get_wptr(struct radeon_device *rdev,
148 struct radeon_ring *ring); 148 struct radeon_ring *ring);
149void r100_gfx_set_wptr(struct radeon_device *rdev, 149void r100_gfx_set_wptr(struct radeon_device *rdev,
150 struct radeon_ring *ring); 150 struct radeon_ring *ring);
151 151void r100_ring_hdp_flush(struct radeon_device *rdev,
152 struct radeon_ring *ring);
152/* 153/*
153 * r200,rv250,rs300,rv280 154 * r200,rv250,rs300,rv280
154 */ 155 */
@@ -173,7 +174,7 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
173extern int r300_cs_parse(struct radeon_cs_parser *p); 174extern int r300_cs_parse(struct radeon_cs_parser *p);
174extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); 175extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
175extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, 176extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
176 uint64_t addr); 177 uint64_t addr, uint32_t flags);
177extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); 178extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
178extern int rv370_get_pcie_lanes(struct radeon_device *rdev); 179extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
179extern void r300_set_reg_safe(struct radeon_device *rdev); 180extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -209,7 +210,7 @@ extern int rs400_suspend(struct radeon_device *rdev);
209extern int rs400_resume(struct radeon_device *rdev); 210extern int rs400_resume(struct radeon_device *rdev);
210void rs400_gart_tlb_flush(struct radeon_device *rdev); 211void rs400_gart_tlb_flush(struct radeon_device *rdev);
211void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, 212void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
212 uint64_t addr); 213 uint64_t addr, uint32_t flags);
213uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); 214uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
214void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 215void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
215int rs400_gart_init(struct radeon_device *rdev); 216int rs400_gart_init(struct radeon_device *rdev);
@@ -233,7 +234,7 @@ void rs600_irq_disable(struct radeon_device *rdev);
233u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); 234u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
234void rs600_gart_tlb_flush(struct radeon_device *rdev); 235void rs600_gart_tlb_flush(struct radeon_device *rdev);
235void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, 236void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
236 uint64_t addr); 237 uint64_t addr, uint32_t flags);
237uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); 238uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
238void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 239void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
239void rs600_bandwidth_update(struct radeon_device *rdev); 240void rs600_bandwidth_update(struct radeon_device *rdev);
@@ -351,7 +352,7 @@ void r600_hpd_fini(struct radeon_device *rdev);
351bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 352bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
352void r600_hpd_set_polarity(struct radeon_device *rdev, 353void r600_hpd_set_polarity(struct radeon_device *rdev,
353 enum radeon_hpd_id hpd); 354 enum radeon_hpd_id hpd);
354extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); 355extern void r600_mmio_hdp_flush(struct radeon_device *rdev);
355extern bool r600_gui_idle(struct radeon_device *rdev); 356extern bool r600_gui_idle(struct radeon_device *rdev);
356extern void r600_pm_misc(struct radeon_device *rdev); 357extern void r600_pm_misc(struct radeon_device *rdev);
357extern void r600_pm_init_profile(struct radeon_device *rdev); 358extern void r600_pm_init_profile(struct radeon_device *rdev);
@@ -606,11 +607,22 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
606 struct radeon_ib *ib); 607 struct radeon_ib *ib);
607bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 608bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
608bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 609bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
609void cayman_dma_vm_set_page(struct radeon_device *rdev, 610
610 struct radeon_ib *ib, 611void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
611 uint64_t pe, 612 struct radeon_ib *ib,
612 uint64_t addr, unsigned count, 613 uint64_t pe, uint64_t src,
613 uint32_t incr, uint32_t flags); 614 unsigned count);
615void cayman_dma_vm_write_pages(struct radeon_device *rdev,
616 struct radeon_ib *ib,
617 uint64_t pe,
618 uint64_t addr, unsigned count,
619 uint32_t incr, uint32_t flags);
620void cayman_dma_vm_set_pages(struct radeon_device *rdev,
621 struct radeon_ib *ib,
622 uint64_t pe,
623 uint64_t addr, unsigned count,
624 uint32_t incr, uint32_t flags);
625void cayman_dma_vm_pad_ib(struct radeon_ib *ib);
614 626
615void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 627void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
616 628
@@ -693,11 +705,22 @@ int si_copy_dma(struct radeon_device *rdev,
693 uint64_t src_offset, uint64_t dst_offset, 705 uint64_t src_offset, uint64_t dst_offset,
694 unsigned num_gpu_pages, 706 unsigned num_gpu_pages,
695 struct radeon_fence **fence); 707 struct radeon_fence **fence);
696void si_dma_vm_set_page(struct radeon_device *rdev, 708
697 struct radeon_ib *ib, 709void si_dma_vm_copy_pages(struct radeon_device *rdev,
698 uint64_t pe, 710 struct radeon_ib *ib,
699 uint64_t addr, unsigned count, 711 uint64_t pe, uint64_t src,
700 uint32_t incr, uint32_t flags); 712 unsigned count);
713void si_dma_vm_write_pages(struct radeon_device *rdev,
714 struct radeon_ib *ib,
715 uint64_t pe,
716 uint64_t addr, unsigned count,
717 uint32_t incr, uint32_t flags);
718void si_dma_vm_set_pages(struct radeon_device *rdev,
719 struct radeon_ib *ib,
720 uint64_t pe,
721 uint64_t addr, unsigned count,
722 uint32_t incr, uint32_t flags);
723
701void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 724void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
702u32 si_get_xclk(struct radeon_device *rdev); 725u32 si_get_xclk(struct radeon_device *rdev);
703uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev); 726uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
@@ -771,11 +794,23 @@ int cik_irq_process(struct radeon_device *rdev);
771int cik_vm_init(struct radeon_device *rdev); 794int cik_vm_init(struct radeon_device *rdev);
772void cik_vm_fini(struct radeon_device *rdev); 795void cik_vm_fini(struct radeon_device *rdev);
773void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 796void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
774void cik_sdma_vm_set_page(struct radeon_device *rdev, 797
775 struct radeon_ib *ib, 798void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
776 uint64_t pe, 799 struct radeon_ib *ib,
777 uint64_t addr, unsigned count, 800 uint64_t pe, uint64_t src,
778 uint32_t incr, uint32_t flags); 801 unsigned count);
802void cik_sdma_vm_write_pages(struct radeon_device *rdev,
803 struct radeon_ib *ib,
804 uint64_t pe,
805 uint64_t addr, unsigned count,
806 uint32_t incr, uint32_t flags);
807void cik_sdma_vm_set_pages(struct radeon_device *rdev,
808 struct radeon_ib *ib,
809 uint64_t pe,
810 uint64_t addr, unsigned count,
811 uint32_t incr, uint32_t flags);
812void cik_sdma_vm_pad_ib(struct radeon_ib *ib);
813
779void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 814void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
780int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 815int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
781u32 cik_gfx_get_rptr(struct radeon_device *rdev, 816u32 cik_gfx_get_rptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 173f378428a9..92b2d8dd4735 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1963,7 +1963,7 @@ static const char *thermal_controller_names[] = {
1963 "adm1032", 1963 "adm1032",
1964 "adm1030", 1964 "adm1030",
1965 "max6649", 1965 "max6649",
1966 "lm64", 1966 "lm63", /* lm64 */
1967 "f75375", 1967 "f75375",
1968 "asc7xxx", 1968 "asc7xxx",
1969}; 1969};
@@ -1974,7 +1974,7 @@ static const char *pp_lib_thermal_controller_names[] = {
1974 "adm1032", 1974 "adm1032",
1975 "adm1030", 1975 "adm1030",
1976 "max6649", 1976 "max6649",
1977 "lm64", 1977 "lm63", /* lm64 */
1978 "f75375", 1978 "f75375",
1979 "RV6xx", 1979 "RV6xx",
1980 "RV770", 1980 "RV770",
@@ -3236,6 +3236,41 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *r
3236 return 0; 3236 return 0;
3237} 3237}
3238 3238
3239union get_voltage_info {
3240 struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
3241 struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
3242};
3243
3244int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
3245 u16 virtual_voltage_id,
3246 u16 *voltage)
3247{
3248 int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
3249 u32 entry_id;
3250 u32 count = rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
3251 union get_voltage_info args;
3252
3253 for (entry_id = 0; entry_id < count; entry_id++) {
3254 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
3255 virtual_voltage_id)
3256 break;
3257 }
3258
3259 if (entry_id >= count)
3260 return -EINVAL;
3261
3262 args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
3263 args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
3264 args.in.ulSCLKFreq =
3265 cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
3266
3267 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
3268
3269 *voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
3270
3271 return 0;
3272}
3273
3239int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev, 3274int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
3240 u16 voltage_level, u8 voltage_type, 3275 u16 voltage_level, u8 voltage_type,
3241 u32 *gpio_value, u32 *gpio_mask) 3276 u32 *gpio_value, u32 *gpio_mask)
@@ -3397,6 +3432,50 @@ radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
3397 return false; 3432 return false;
3398} 3433}
3399 3434
3435int radeon_atom_get_svi2_info(struct radeon_device *rdev,
3436 u8 voltage_type,
3437 u8 *svd_gpio_id, u8 *svc_gpio_id)
3438{
3439 int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
3440 u8 frev, crev;
3441 u16 data_offset, size;
3442 union voltage_object_info *voltage_info;
3443 union voltage_object *voltage_object = NULL;
3444
3445 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
3446 &frev, &crev, &data_offset)) {
3447 voltage_info = (union voltage_object_info *)
3448 (rdev->mode_info.atom_context->bios + data_offset);
3449
3450 switch (frev) {
3451 case 3:
3452 switch (crev) {
3453 case 1:
3454 voltage_object = (union voltage_object *)
3455 atom_lookup_voltage_object_v3(&voltage_info->v3,
3456 voltage_type,
3457 VOLTAGE_OBJ_SVID2);
3458 if (voltage_object) {
3459 *svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId;
3460 *svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId;
3461 } else {
3462 return -EINVAL;
3463 }
3464 break;
3465 default:
3466 DRM_ERROR("unknown voltage object table\n");
3467 return -EINVAL;
3468 }
3469 break;
3470 default:
3471 DRM_ERROR("unknown voltage object table\n");
3472 return -EINVAL;
3473 }
3474
3475 }
3476 return 0;
3477}
3478
3400int radeon_atom_get_max_voltage(struct radeon_device *rdev, 3479int radeon_atom_get_max_voltage(struct radeon_device *rdev,
3401 u8 voltage_type, u16 *max_voltage) 3480 u8 voltage_type, u16 *max_voltage)
3402{ 3481{
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 6e05a2e75a46..69f5695bdab9 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -97,7 +97,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
97 int time; 97 int time;
98 98
99 n = RADEON_BENCHMARK_ITERATIONS; 99 n = RADEON_BENCHMARK_ITERATIONS;
100 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj); 100 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj);
101 if (r) { 101 if (r) {
102 goto out_cleanup; 102 goto out_cleanup;
103 } 103 }
@@ -109,7 +109,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
109 if (r) { 109 if (r) {
110 goto out_cleanup; 110 goto out_cleanup;
111 } 111 }
112 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj); 112 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj);
113 if (r) { 113 if (r) {
114 goto out_cleanup; 114 goto out_cleanup;
115 } 115 }
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 44831197e82e..300c4b3d4669 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -107,7 +107,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
107 case DRM_MODE_CONNECTOR_DVII: 107 case DRM_MODE_CONNECTOR_DVII:
108 case DRM_MODE_CONNECTOR_HDMIB: 108 case DRM_MODE_CONNECTOR_HDMIB:
109 if (radeon_connector->use_digital) { 109 if (radeon_connector->use_digital) {
110 if (drm_detect_hdmi_monitor(radeon_connector->edid)) { 110 if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
111 if (connector->display_info.bpc) 111 if (connector->display_info.bpc)
112 bpc = connector->display_info.bpc; 112 bpc = connector->display_info.bpc;
113 } 113 }
@@ -115,7 +115,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
115 break; 115 break;
116 case DRM_MODE_CONNECTOR_DVID: 116 case DRM_MODE_CONNECTOR_DVID:
117 case DRM_MODE_CONNECTOR_HDMIA: 117 case DRM_MODE_CONNECTOR_HDMIA:
118 if (drm_detect_hdmi_monitor(radeon_connector->edid)) { 118 if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
119 if (connector->display_info.bpc) 119 if (connector->display_info.bpc)
120 bpc = connector->display_info.bpc; 120 bpc = connector->display_info.bpc;
121 } 121 }
@@ -124,7 +124,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
124 dig_connector = radeon_connector->con_priv; 124 dig_connector = radeon_connector->con_priv;
125 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 125 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
126 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) || 126 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
127 drm_detect_hdmi_monitor(radeon_connector->edid)) { 127 drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
128 if (connector->display_info.bpc) 128 if (connector->display_info.bpc)
129 bpc = connector->display_info.bpc; 129 bpc = connector->display_info.bpc;
130 } 130 }
@@ -148,7 +148,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
148 break; 148 break;
149 } 149 }
150 150
151 if (drm_detect_hdmi_monitor(radeon_connector->edid)) { 151 if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
152 /* hdmi deep color only implemented on DCE4+ */ 152 /* hdmi deep color only implemented on DCE4+ */
153 if ((bpc > 8) && !ASIC_IS_DCE4(rdev)) { 153 if ((bpc > 8) && !ASIC_IS_DCE4(rdev)) {
154 DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 8 bpc.\n", 154 DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 8 bpc.\n",
@@ -197,10 +197,19 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
197 connector->name, bpc); 197 connector->name, bpc);
198 } 198 }
199 } 199 }
200 else if (bpc > 8) {
201 /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
202 DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
203 connector->name);
204 bpc = 8;
205 }
200 } 206 }
201 207
202 if ((radeon_deep_color == 0) && (bpc > 8)) 208 if ((radeon_deep_color == 0) && (bpc > 8)) {
209 DRM_DEBUG("%s: Deep color disabled. Set radeon module param deep_color=1 to enable.\n",
210 connector->name);
203 bpc = 8; 211 bpc = 8;
212 }
204 213
205 DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n", 214 DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
206 connector->name, connector->display_info.bpc, bpc); 215 connector->name, connector->display_info.bpc, bpc);
@@ -216,7 +225,6 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
216 struct drm_encoder *best_encoder = NULL; 225 struct drm_encoder *best_encoder = NULL;
217 struct drm_encoder *encoder = NULL; 226 struct drm_encoder *encoder = NULL;
218 struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; 227 struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
219 struct drm_mode_object *obj;
220 bool connected; 228 bool connected;
221 int i; 229 int i;
222 230
@@ -226,14 +234,11 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
226 if (connector->encoder_ids[i] == 0) 234 if (connector->encoder_ids[i] == 0)
227 break; 235 break;
228 236
229 obj = drm_mode_object_find(connector->dev, 237 encoder = drm_encoder_find(connector->dev,
230 connector->encoder_ids[i], 238 connector->encoder_ids[i]);
231 DRM_MODE_OBJECT_ENCODER); 239 if (!encoder)
232 if (!obj)
233 continue; 240 continue;
234 241
235 encoder = obj_to_encoder(obj);
236
237 if ((encoder == best_encoder) && (status == connector_status_connected)) 242 if ((encoder == best_encoder) && (status == connector_status_connected))
238 connected = true; 243 connected = true;
239 else 244 else
@@ -249,7 +254,6 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
249 254
250static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type) 255static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
251{ 256{
252 struct drm_mode_object *obj;
253 struct drm_encoder *encoder; 257 struct drm_encoder *encoder;
254 int i; 258 int i;
255 259
@@ -257,34 +261,134 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector,
257 if (connector->encoder_ids[i] == 0) 261 if (connector->encoder_ids[i] == 0)
258 break; 262 break;
259 263
260 obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); 264 encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
261 if (!obj) 265 if (!encoder)
262 continue; 266 continue;
263 267
264 encoder = obj_to_encoder(obj);
265 if (encoder->encoder_type == encoder_type) 268 if (encoder->encoder_type == encoder_type)
266 return encoder; 269 return encoder;
267 } 270 }
268 return NULL; 271 return NULL;
269} 272}
270 273
274struct edid *radeon_connector_edid(struct drm_connector *connector)
275{
276 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
277 struct drm_property_blob *edid_blob = connector->edid_blob_ptr;
278
279 if (radeon_connector->edid) {
280 return radeon_connector->edid;
281 } else if (edid_blob) {
282 struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL);
283 if (edid)
284 radeon_connector->edid = edid;
285 }
286 return radeon_connector->edid;
287}
288
289static void radeon_connector_get_edid(struct drm_connector *connector)
290{
291 struct drm_device *dev = connector->dev;
292 struct radeon_device *rdev = dev->dev_private;
293 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
294
295 if (radeon_connector->edid)
296 return;
297
298 /* on hw with routers, select right port */
299 if (radeon_connector->router.ddc_valid)
300 radeon_router_select_ddc_port(radeon_connector);
301
302 if ((radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
303 ENCODER_OBJECT_ID_NONE) &&
304 radeon_connector->ddc_bus->has_aux) {
305 radeon_connector->edid = drm_get_edid(connector,
306 &radeon_connector->ddc_bus->aux.ddc);
307 } else if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
308 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
309 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
310
311 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
312 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
313 radeon_connector->ddc_bus->has_aux)
314 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
315 &radeon_connector->ddc_bus->aux.ddc);
316 else if (radeon_connector->ddc_bus)
317 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
318 &radeon_connector->ddc_bus->adapter);
319 } else if (radeon_connector->ddc_bus) {
320 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
321 &radeon_connector->ddc_bus->adapter);
322 }
323
324 if (!radeon_connector->edid) {
325 if (rdev->is_atom_bios) {
326 /* some laptops provide a hardcoded edid in rom for LCDs */
327 if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
328 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)))
329 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
330 } else {
331 /* some servers provide a hardcoded edid in rom for KVMs */
332 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
333 }
334 }
335}
336
337static void radeon_connector_free_edid(struct drm_connector *connector)
338{
339 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
340
341 if (radeon_connector->edid) {
342 kfree(radeon_connector->edid);
343 radeon_connector->edid = NULL;
344 }
345}
346
347static int radeon_ddc_get_modes(struct drm_connector *connector)
348{
349 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
350 int ret;
351
352 if (radeon_connector->edid) {
353 drm_mode_connector_update_edid_property(connector, radeon_connector->edid);
354 ret = drm_add_edid_modes(connector, radeon_connector->edid);
355 drm_edid_to_eld(connector, radeon_connector->edid);
356 return ret;
357 }
358 drm_mode_connector_update_edid_property(connector, NULL);
359 return 0;
360}
361
271static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector) 362static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
272{ 363{
273 int enc_id = connector->encoder_ids[0]; 364 int enc_id = connector->encoder_ids[0];
274 struct drm_mode_object *obj;
275 struct drm_encoder *encoder;
276
277 /* pick the encoder ids */ 365 /* pick the encoder ids */
278 if (enc_id) { 366 if (enc_id)
279 obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); 367 return drm_encoder_find(connector->dev, enc_id);
280 if (!obj)
281 return NULL;
282 encoder = obj_to_encoder(obj);
283 return encoder;
284 }
285 return NULL; 368 return NULL;
286} 369}
287 370
371static void radeon_get_native_mode(struct drm_connector *connector)
372{
373 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
374 struct radeon_encoder *radeon_encoder;
375
376 if (encoder == NULL)
377 return;
378
379 radeon_encoder = to_radeon_encoder(encoder);
380
381 if (!list_empty(&connector->probed_modes)) {
382 struct drm_display_mode *preferred_mode =
383 list_first_entry(&connector->probed_modes,
384 struct drm_display_mode, head);
385
386 radeon_encoder->native_mode = *preferred_mode;
387 } else {
388 radeon_encoder->native_mode.clock = 0;
389 }
390}
391
288/* 392/*
289 * radeon_connector_analog_encoder_conflict_solve 393 * radeon_connector_analog_encoder_conflict_solve
290 * - search for other connectors sharing this encoder 394 * - search for other connectors sharing this encoder
@@ -585,6 +689,35 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
585 radeon_property_change_mode(&radeon_encoder->base); 689 radeon_property_change_mode(&radeon_encoder->base);
586 } 690 }
587 691
692 if (property == dev->mode_config.scaling_mode_property) {
693 enum radeon_rmx_type rmx_type;
694
695 if (connector->encoder)
696 radeon_encoder = to_radeon_encoder(connector->encoder);
697 else {
698 struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
699 radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
700 }
701
702 switch (val) {
703 default:
704 case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
705 case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
706 case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
707 case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
708 }
709 if (radeon_encoder->rmx_type == rmx_type)
710 return 0;
711
712 if ((rmx_type != DRM_MODE_SCALE_NONE) &&
713 (radeon_encoder->native_mode.clock == 0))
714 return 0;
715
716 radeon_encoder->rmx_type = rmx_type;
717
718 radeon_property_change_mode(&radeon_encoder->base);
719 }
720
588 return 0; 721 return 0;
589} 722}
590 723
@@ -625,22 +758,20 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
625 758
626static int radeon_lvds_get_modes(struct drm_connector *connector) 759static int radeon_lvds_get_modes(struct drm_connector *connector)
627{ 760{
628 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
629 struct drm_encoder *encoder; 761 struct drm_encoder *encoder;
630 int ret = 0; 762 int ret = 0;
631 struct drm_display_mode *mode; 763 struct drm_display_mode *mode;
632 764
633 if (radeon_connector->ddc_bus) { 765 radeon_connector_get_edid(connector);
634 ret = radeon_ddc_get_modes(radeon_connector); 766 ret = radeon_ddc_get_modes(connector);
635 if (ret > 0) { 767 if (ret > 0) {
636 encoder = radeon_best_single_encoder(connector); 768 encoder = radeon_best_single_encoder(connector);
637 if (encoder) { 769 if (encoder) {
638 radeon_fixup_lvds_native_mode(encoder, connector); 770 radeon_fixup_lvds_native_mode(encoder, connector);
639 /* add scaled modes */ 771 /* add scaled modes */
640 radeon_add_common_modes(encoder, connector); 772 radeon_add_common_modes(encoder, connector);
641 }
642 return ret;
643 } 773 }
774 return ret;
644 } 775 }
645 776
646 encoder = radeon_best_single_encoder(connector); 777 encoder = radeon_best_single_encoder(connector);
@@ -715,16 +846,9 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
715 } 846 }
716 847
717 /* check for edid as well */ 848 /* check for edid as well */
849 radeon_connector_get_edid(connector);
718 if (radeon_connector->edid) 850 if (radeon_connector->edid)
719 ret = connector_status_connected; 851 ret = connector_status_connected;
720 else {
721 if (radeon_connector->ddc_bus) {
722 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
723 &radeon_connector->ddc_bus->adapter);
724 if (radeon_connector->edid)
725 ret = connector_status_connected;
726 }
727 }
728 /* check acpi lid status ??? */ 852 /* check acpi lid status ??? */
729 853
730 radeon_connector_update_scratch_regs(connector, ret); 854 radeon_connector_update_scratch_regs(connector, ret);
@@ -737,10 +861,9 @@ static void radeon_connector_destroy(struct drm_connector *connector)
737{ 861{
738 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 862 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
739 863
740 if (radeon_connector->edid) 864 radeon_connector_free_edid(connector);
741 kfree(radeon_connector->edid);
742 kfree(radeon_connector->con_priv); 865 kfree(radeon_connector->con_priv);
743 drm_sysfs_connector_remove(connector); 866 drm_connector_unregister(connector);
744 drm_connector_cleanup(connector); 867 drm_connector_cleanup(connector);
745 kfree(connector); 868 kfree(connector);
746} 869}
@@ -797,10 +920,12 @@ static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
797 920
798static int radeon_vga_get_modes(struct drm_connector *connector) 921static int radeon_vga_get_modes(struct drm_connector *connector)
799{ 922{
800 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
801 int ret; 923 int ret;
802 924
803 ret = radeon_ddc_get_modes(radeon_connector); 925 radeon_connector_get_edid(connector);
926 ret = radeon_ddc_get_modes(connector);
927
928 radeon_get_native_mode(connector);
804 929
805 return ret; 930 return ret;
806} 931}
@@ -843,28 +968,26 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
843 dret = radeon_ddc_probe(radeon_connector, false); 968 dret = radeon_ddc_probe(radeon_connector, false);
844 if (dret) { 969 if (dret) {
845 radeon_connector->detected_by_load = false; 970 radeon_connector->detected_by_load = false;
846 if (radeon_connector->edid) { 971 radeon_connector_free_edid(connector);
847 kfree(radeon_connector->edid); 972 radeon_connector_get_edid(connector);
848 radeon_connector->edid = NULL;
849 }
850 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
851 973
852 if (!radeon_connector->edid) { 974 if (!radeon_connector->edid) {
853 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", 975 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
854 connector->name); 976 connector->name);
855 ret = connector_status_connected; 977 ret = connector_status_connected;
856 } else { 978 } else {
857 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); 979 radeon_connector->use_digital =
980 !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
858 981
859 /* some oems have boards with separate digital and analog connectors 982 /* some oems have boards with separate digital and analog connectors
860 * with a shared ddc line (often vga + hdmi) 983 * with a shared ddc line (often vga + hdmi)
861 */ 984 */
862 if (radeon_connector->use_digital && radeon_connector->shared_ddc) { 985 if (radeon_connector->use_digital && radeon_connector->shared_ddc) {
863 kfree(radeon_connector->edid); 986 radeon_connector_free_edid(connector);
864 radeon_connector->edid = NULL;
865 ret = connector_status_disconnected; 987 ret = connector_status_disconnected;
866 } else 988 } else {
867 ret = connector_status_connected; 989 ret = connector_status_connected;
990 }
868 } 991 }
869 } else { 992 } else {
870 993
@@ -999,15 +1122,6 @@ static const struct drm_connector_funcs radeon_tv_connector_funcs = {
999 .set_property = radeon_connector_set_property, 1122 .set_property = radeon_connector_set_property,
1000}; 1123};
1001 1124
1002static int radeon_dvi_get_modes(struct drm_connector *connector)
1003{
1004 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1005 int ret;
1006
1007 ret = radeon_ddc_get_modes(radeon_connector);
1008 return ret;
1009}
1010
1011static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector) 1125static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector)
1012{ 1126{
1013 struct drm_device *dev = connector->dev; 1127 struct drm_device *dev = connector->dev;
@@ -1048,7 +1162,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1048 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1162 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1049 struct drm_encoder *encoder = NULL; 1163 struct drm_encoder *encoder = NULL;
1050 struct drm_encoder_helper_funcs *encoder_funcs; 1164 struct drm_encoder_helper_funcs *encoder_funcs;
1051 struct drm_mode_object *obj;
1052 int i, r; 1165 int i, r;
1053 enum drm_connector_status ret = connector_status_disconnected; 1166 enum drm_connector_status ret = connector_status_disconnected;
1054 bool dret = false, broken_edid = false; 1167 bool dret = false, broken_edid = false;
@@ -1066,18 +1179,16 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1066 dret = radeon_ddc_probe(radeon_connector, false); 1179 dret = radeon_ddc_probe(radeon_connector, false);
1067 if (dret) { 1180 if (dret) {
1068 radeon_connector->detected_by_load = false; 1181 radeon_connector->detected_by_load = false;
1069 if (radeon_connector->edid) { 1182 radeon_connector_free_edid(connector);
1070 kfree(radeon_connector->edid); 1183 radeon_connector_get_edid(connector);
1071 radeon_connector->edid = NULL;
1072 }
1073 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
1074 1184
1075 if (!radeon_connector->edid) { 1185 if (!radeon_connector->edid) {
1076 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", 1186 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
1077 connector->name); 1187 connector->name);
1078 /* rs690 seems to have a problem with connectors not existing and always 1188 /* rs690 seems to have a problem with connectors not existing and always
1079 * return a block of 0's. If we see this just stop polling on this output */ 1189 * return a block of 0's. If we see this just stop polling on this output */
1080 if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) { 1190 if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) &&
1191 radeon_connector->base.null_edid_counter) {
1081 ret = connector_status_disconnected; 1192 ret = connector_status_disconnected;
1082 DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", 1193 DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n",
1083 connector->name); 1194 connector->name);
@@ -1087,18 +1198,18 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1087 broken_edid = true; /* defer use_digital to later */ 1198 broken_edid = true; /* defer use_digital to later */
1088 } 1199 }
1089 } else { 1200 } else {
1090 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); 1201 radeon_connector->use_digital =
1202 !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
1091 1203
1092 /* some oems have boards with separate digital and analog connectors 1204 /* some oems have boards with separate digital and analog connectors
1093 * with a shared ddc line (often vga + hdmi) 1205 * with a shared ddc line (often vga + hdmi)
1094 */ 1206 */
1095 if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) { 1207 if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) {
1096 kfree(radeon_connector->edid); 1208 radeon_connector_free_edid(connector);
1097 radeon_connector->edid = NULL;
1098 ret = connector_status_disconnected; 1209 ret = connector_status_disconnected;
1099 } else 1210 } else {
1100 ret = connector_status_connected; 1211 ret = connector_status_connected;
1101 1212 }
1102 /* This gets complicated. We have boards with VGA + HDMI with a 1213 /* This gets complicated. We have boards with VGA + HDMI with a
1103 * shared DDC line and we have boards with DVI-D + HDMI with a shared 1214 * shared DDC line and we have boards with DVI-D + HDMI with a shared
1104 * DDC line. The latter is more complex because with DVI<->HDMI adapters 1215 * DDC line. The latter is more complex because with DVI<->HDMI adapters
@@ -1118,8 +1229,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1118 if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) { 1229 if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
1119 /* hpd is our only option in this case */ 1230 /* hpd is our only option in this case */
1120 if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { 1231 if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
1121 kfree(radeon_connector->edid); 1232 radeon_connector_free_edid(connector);
1122 radeon_connector->edid = NULL;
1123 ret = connector_status_disconnected; 1233 ret = connector_status_disconnected;
1124 } 1234 }
1125 } 1235 }
@@ -1153,14 +1263,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1153 if (connector->encoder_ids[i] == 0) 1263 if (connector->encoder_ids[i] == 0)
1154 break; 1264 break;
1155 1265
1156 obj = drm_mode_object_find(connector->dev, 1266 encoder = drm_encoder_find(connector->dev,
1157 connector->encoder_ids[i], 1267 connector->encoder_ids[i]);
1158 DRM_MODE_OBJECT_ENCODER); 1268 if (!encoder)
1159 if (!obj)
1160 continue; 1269 continue;
1161 1270
1162 encoder = obj_to_encoder(obj);
1163
1164 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC && 1271 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
1165 encoder->encoder_type != DRM_MODE_ENCODER_TVDAC) 1272 encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
1166 continue; 1273 continue;
@@ -1225,19 +1332,16 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
1225{ 1332{
1226 int enc_id = connector->encoder_ids[0]; 1333 int enc_id = connector->encoder_ids[0];
1227 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1334 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1228 struct drm_mode_object *obj;
1229 struct drm_encoder *encoder; 1335 struct drm_encoder *encoder;
1230 int i; 1336 int i;
1231 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 1337 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
1232 if (connector->encoder_ids[i] == 0) 1338 if (connector->encoder_ids[i] == 0)
1233 break; 1339 break;
1234 1340
1235 obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); 1341 encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
1236 if (!obj) 1342 if (!encoder)
1237 continue; 1343 continue;
1238 1344
1239 encoder = obj_to_encoder(obj);
1240
1241 if (radeon_connector->use_digital == true) { 1345 if (radeon_connector->use_digital == true) {
1242 if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS) 1346 if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
1243 return encoder; 1347 return encoder;
@@ -1252,13 +1356,8 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
1252 1356
1253 /* then check use digitial */ 1357 /* then check use digitial */
1254 /* pick the first one */ 1358 /* pick the first one */
1255 if (enc_id) { 1359 if (enc_id)
1256 obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); 1360 return drm_encoder_find(connector->dev, enc_id);
1257 if (!obj)
1258 return NULL;
1259 encoder = obj_to_encoder(obj);
1260 return encoder;
1261 }
1262 return NULL; 1361 return NULL;
1263} 1362}
1264 1363
@@ -1291,7 +1390,7 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
1291 (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || 1390 (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
1292 (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) 1391 (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
1293 return MODE_OK; 1392 return MODE_OK;
1294 else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) { 1393 else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
1295 /* HDMI 1.3+ supports max clock of 340 Mhz */ 1394 /* HDMI 1.3+ supports max clock of 340 Mhz */
1296 if (mode->clock > 340000) 1395 if (mode->clock > 340000)
1297 return MODE_CLOCK_HIGH; 1396 return MODE_CLOCK_HIGH;
@@ -1310,7 +1409,7 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
1310} 1409}
1311 1410
1312static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = { 1411static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
1313 .get_modes = radeon_dvi_get_modes, 1412 .get_modes = radeon_vga_get_modes,
1314 .mode_valid = radeon_dvi_mode_valid, 1413 .mode_valid = radeon_dvi_mode_valid,
1315 .best_encoder = radeon_dvi_encoder, 1414 .best_encoder = radeon_dvi_encoder,
1316}; 1415};
@@ -1339,7 +1438,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
1339 if (!radeon_dig_connector->edp_on) 1438 if (!radeon_dig_connector->edp_on)
1340 atombios_set_edp_panel_power(connector, 1439 atombios_set_edp_panel_power(connector,
1341 ATOM_TRANSMITTER_ACTION_POWER_ON); 1440 ATOM_TRANSMITTER_ACTION_POWER_ON);
1342 ret = radeon_ddc_get_modes(radeon_connector); 1441 radeon_connector_get_edid(connector);
1442 ret = radeon_ddc_get_modes(connector);
1343 if (!radeon_dig_connector->edp_on) 1443 if (!radeon_dig_connector->edp_on)
1344 atombios_set_edp_panel_power(connector, 1444 atombios_set_edp_panel_power(connector,
1345 ATOM_TRANSMITTER_ACTION_POWER_OFF); 1445 ATOM_TRANSMITTER_ACTION_POWER_OFF);
@@ -1350,7 +1450,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
1350 if (encoder) 1450 if (encoder)
1351 radeon_atom_ext_encoder_setup_ddc(encoder); 1451 radeon_atom_ext_encoder_setup_ddc(encoder);
1352 } 1452 }
1353 ret = radeon_ddc_get_modes(radeon_connector); 1453 radeon_connector_get_edid(connector);
1454 ret = radeon_ddc_get_modes(connector);
1354 } 1455 }
1355 1456
1356 if (ret > 0) { 1457 if (ret > 0) {
@@ -1383,7 +1484,10 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
1383 if (encoder) 1484 if (encoder)
1384 radeon_atom_ext_encoder_setup_ddc(encoder); 1485 radeon_atom_ext_encoder_setup_ddc(encoder);
1385 } 1486 }
1386 ret = radeon_ddc_get_modes(radeon_connector); 1487 radeon_connector_get_edid(connector);
1488 ret = radeon_ddc_get_modes(connector);
1489
1490 radeon_get_native_mode(connector);
1387 } 1491 }
1388 1492
1389 return ret; 1493 return ret;
@@ -1391,7 +1495,6 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
1391 1495
1392u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector) 1496u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
1393{ 1497{
1394 struct drm_mode_object *obj;
1395 struct drm_encoder *encoder; 1498 struct drm_encoder *encoder;
1396 struct radeon_encoder *radeon_encoder; 1499 struct radeon_encoder *radeon_encoder;
1397 int i; 1500 int i;
@@ -1400,11 +1503,10 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
1400 if (connector->encoder_ids[i] == 0) 1503 if (connector->encoder_ids[i] == 0)
1401 break; 1504 break;
1402 1505
1403 obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); 1506 encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
1404 if (!obj) 1507 if (!encoder)
1405 continue; 1508 continue;
1406 1509
1407 encoder = obj_to_encoder(obj);
1408 radeon_encoder = to_radeon_encoder(encoder); 1510 radeon_encoder = to_radeon_encoder(encoder);
1409 1511
1410 switch (radeon_encoder->encoder_id) { 1512 switch (radeon_encoder->encoder_id) {
@@ -1419,9 +1521,8 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
1419 return ENCODER_OBJECT_ID_NONE; 1521 return ENCODER_OBJECT_ID_NONE;
1420} 1522}
1421 1523
1422bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector) 1524static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
1423{ 1525{
1424 struct drm_mode_object *obj;
1425 struct drm_encoder *encoder; 1526 struct drm_encoder *encoder;
1426 struct radeon_encoder *radeon_encoder; 1527 struct radeon_encoder *radeon_encoder;
1427 int i; 1528 int i;
@@ -1431,11 +1532,10 @@ bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
1431 if (connector->encoder_ids[i] == 0) 1532 if (connector->encoder_ids[i] == 0)
1432 break; 1533 break;
1433 1534
1434 obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); 1535 encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
1435 if (!obj) 1536 if (!encoder)
1436 continue; 1537 continue;
1437 1538
1438 encoder = obj_to_encoder(obj);
1439 radeon_encoder = to_radeon_encoder(encoder); 1539 radeon_encoder = to_radeon_encoder(encoder);
1440 if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2) 1540 if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
1441 found = true; 1541 found = true;
@@ -1478,10 +1578,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1478 goto out; 1578 goto out;
1479 } 1579 }
1480 1580
1481 if (radeon_connector->edid) { 1581 radeon_connector_free_edid(connector);
1482 kfree(radeon_connector->edid);
1483 radeon_connector->edid = NULL;
1484 }
1485 1582
1486 if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || 1583 if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
1487 (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { 1584 (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
@@ -1587,7 +1684,7 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
1587 (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { 1684 (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
1588 return radeon_dp_mode_valid_helper(connector, mode); 1685 return radeon_dp_mode_valid_helper(connector, mode);
1589 } else { 1686 } else {
1590 if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) { 1687 if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
1591 /* HDMI 1.3+ supports max clock of 340 Mhz */ 1688 /* HDMI 1.3+ supports max clock of 340 Mhz */
1592 if (mode->clock > 340000) 1689 if (mode->clock > 340000)
1593 return MODE_CLOCK_HIGH; 1690 return MODE_CLOCK_HIGH;
@@ -1747,6 +1844,9 @@ radeon_add_atom_connector(struct drm_device *dev,
1747 drm_object_attach_property(&radeon_connector->base.base, 1844 drm_object_attach_property(&radeon_connector->base.base,
1748 rdev->mode_info.load_detect_property, 1845 rdev->mode_info.load_detect_property,
1749 1); 1846 1);
1847 drm_object_attach_property(&radeon_connector->base.base,
1848 dev->mode_config.scaling_mode_property,
1849 DRM_MODE_SCALE_NONE);
1750 break; 1850 break;
1751 case DRM_MODE_CONNECTOR_DVII: 1851 case DRM_MODE_CONNECTOR_DVII:
1752 case DRM_MODE_CONNECTOR_DVID: 1852 case DRM_MODE_CONNECTOR_DVID:
@@ -1768,6 +1868,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1768 0); 1868 0);
1769 1869
1770 drm_object_attach_property(&radeon_connector->base.base, 1870 drm_object_attach_property(&radeon_connector->base.base,
1871 dev->mode_config.scaling_mode_property,
1872 DRM_MODE_SCALE_NONE);
1873
1874 drm_object_attach_property(&radeon_connector->base.base,
1771 rdev->mode_info.dither_property, 1875 rdev->mode_info.dither_property,
1772 RADEON_FMT_DITHER_DISABLE); 1876 RADEON_FMT_DITHER_DISABLE);
1773 1877
@@ -1817,6 +1921,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1817 drm_object_attach_property(&radeon_connector->base.base, 1921 drm_object_attach_property(&radeon_connector->base.base,
1818 rdev->mode_info.load_detect_property, 1922 rdev->mode_info.load_detect_property,
1819 1); 1923 1);
1924 if (ASIC_IS_AVIVO(rdev))
1925 drm_object_attach_property(&radeon_connector->base.base,
1926 dev->mode_config.scaling_mode_property,
1927 DRM_MODE_SCALE_NONE);
1820 /* no HPD on analog connectors */ 1928 /* no HPD on analog connectors */
1821 radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1929 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1822 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 1930 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
@@ -1835,6 +1943,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1835 drm_object_attach_property(&radeon_connector->base.base, 1943 drm_object_attach_property(&radeon_connector->base.base,
1836 rdev->mode_info.load_detect_property, 1944 rdev->mode_info.load_detect_property,
1837 1); 1945 1);
1946 if (ASIC_IS_AVIVO(rdev))
1947 drm_object_attach_property(&radeon_connector->base.base,
1948 dev->mode_config.scaling_mode_property,
1949 DRM_MODE_SCALE_NONE);
1838 /* no HPD on analog connectors */ 1950 /* no HPD on analog connectors */
1839 radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1951 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1840 connector->interlace_allowed = true; 1952 connector->interlace_allowed = true;
@@ -1868,17 +1980,18 @@ radeon_add_atom_connector(struct drm_device *dev,
1868 drm_object_attach_property(&radeon_connector->base.base, 1980 drm_object_attach_property(&radeon_connector->base.base,
1869 rdev->mode_info.underscan_vborder_property, 1981 rdev->mode_info.underscan_vborder_property,
1870 0); 1982 0);
1983 drm_object_attach_property(&radeon_connector->base.base,
1984 rdev->mode_info.dither_property,
1985 RADEON_FMT_DITHER_DISABLE);
1986 drm_object_attach_property(&radeon_connector->base.base,
1987 dev->mode_config.scaling_mode_property,
1988 DRM_MODE_SCALE_NONE);
1871 } 1989 }
1872 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) { 1990 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
1873 drm_object_attach_property(&radeon_connector->base.base, 1991 drm_object_attach_property(&radeon_connector->base.base,
1874 rdev->mode_info.audio_property, 1992 rdev->mode_info.audio_property,
1875 RADEON_AUDIO_AUTO); 1993 RADEON_AUDIO_AUTO);
1876 } 1994 }
1877 if (ASIC_IS_AVIVO(rdev)) {
1878 drm_object_attach_property(&radeon_connector->base.base,
1879 rdev->mode_info.dither_property,
1880 RADEON_FMT_DITHER_DISABLE);
1881 }
1882 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1995 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1883 radeon_connector->dac_load_detect = true; 1996 radeon_connector->dac_load_detect = true;
1884 drm_object_attach_property(&radeon_connector->base.base, 1997 drm_object_attach_property(&radeon_connector->base.base,
@@ -1918,17 +2031,18 @@ radeon_add_atom_connector(struct drm_device *dev,
1918 drm_object_attach_property(&radeon_connector->base.base, 2031 drm_object_attach_property(&radeon_connector->base.base,
1919 rdev->mode_info.underscan_vborder_property, 2032 rdev->mode_info.underscan_vborder_property,
1920 0); 2033 0);
2034 drm_object_attach_property(&radeon_connector->base.base,
2035 rdev->mode_info.dither_property,
2036 RADEON_FMT_DITHER_DISABLE);
2037 drm_object_attach_property(&radeon_connector->base.base,
2038 dev->mode_config.scaling_mode_property,
2039 DRM_MODE_SCALE_NONE);
1921 } 2040 }
1922 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) { 2041 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
1923 drm_object_attach_property(&radeon_connector->base.base, 2042 drm_object_attach_property(&radeon_connector->base.base,
1924 rdev->mode_info.audio_property, 2043 rdev->mode_info.audio_property,
1925 RADEON_AUDIO_AUTO); 2044 RADEON_AUDIO_AUTO);
1926 } 2045 }
1927 if (ASIC_IS_AVIVO(rdev)) {
1928 drm_object_attach_property(&radeon_connector->base.base,
1929 rdev->mode_info.dither_property,
1930 RADEON_FMT_DITHER_DISABLE);
1931 }
1932 subpixel_order = SubPixelHorizontalRGB; 2046 subpixel_order = SubPixelHorizontalRGB;
1933 connector->interlace_allowed = true; 2047 connector->interlace_allowed = true;
1934 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) 2048 if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1965,18 +2079,18 @@ radeon_add_atom_connector(struct drm_device *dev,
1965 drm_object_attach_property(&radeon_connector->base.base, 2079 drm_object_attach_property(&radeon_connector->base.base,
1966 rdev->mode_info.underscan_vborder_property, 2080 rdev->mode_info.underscan_vborder_property,
1967 0); 2081 0);
2082 drm_object_attach_property(&radeon_connector->base.base,
2083 rdev->mode_info.dither_property,
2084 RADEON_FMT_DITHER_DISABLE);
2085 drm_object_attach_property(&radeon_connector->base.base,
2086 dev->mode_config.scaling_mode_property,
2087 DRM_MODE_SCALE_NONE);
1968 } 2088 }
1969 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) { 2089 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
1970 drm_object_attach_property(&radeon_connector->base.base, 2090 drm_object_attach_property(&radeon_connector->base.base,
1971 rdev->mode_info.audio_property, 2091 rdev->mode_info.audio_property,
1972 RADEON_AUDIO_AUTO); 2092 RADEON_AUDIO_AUTO);
1973 } 2093 }
1974 if (ASIC_IS_AVIVO(rdev)) {
1975 drm_object_attach_property(&radeon_connector->base.base,
1976 rdev->mode_info.dither_property,
1977 RADEON_FMT_DITHER_DISABLE);
1978
1979 }
1980 connector->interlace_allowed = true; 2094 connector->interlace_allowed = true;
1981 /* in theory with a DP to VGA converter... */ 2095 /* in theory with a DP to VGA converter... */
1982 connector->doublescan_allowed = false; 2096 connector->doublescan_allowed = false;
@@ -2050,7 +2164,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2050 connector->polled = DRM_CONNECTOR_POLL_HPD; 2164 connector->polled = DRM_CONNECTOR_POLL_HPD;
2051 2165
2052 connector->display_info.subpixel_order = subpixel_order; 2166 connector->display_info.subpixel_order = subpixel_order;
2053 drm_sysfs_connector_add(connector); 2167 drm_connector_register(connector);
2054 2168
2055 if (has_aux) 2169 if (has_aux)
2056 radeon_dp_aux_init(radeon_connector); 2170 radeon_dp_aux_init(radeon_connector);
@@ -2211,5 +2325,5 @@ radeon_add_legacy_connector(struct drm_device *dev,
2211 } else 2325 } else
2212 connector->polled = DRM_CONNECTOR_POLL_HPD; 2326 connector->polled = DRM_CONNECTOR_POLL_HPD;
2213 connector->display_info.subpixel_order = subpixel_order; 2327 connector->display_info.subpixel_order = subpixel_order;
2214 drm_sysfs_connector_add(connector); 2328 drm_connector_register(connector);
2215} 2329}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ae763f60c8a0..ee712c199b25 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -500,7 +500,8 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
500 if (r) 500 if (r)
501 return r; 501 return r;
502 } 502 }
503 return 0; 503
504 return radeon_vm_clear_invalids(rdev, vm);
504} 505}
505 506
506static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, 507static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 697add2cd4e3..c8ea050c8fa4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -103,6 +103,31 @@ static const char radeon_family_name[][16] = {
103 "LAST", 103 "LAST",
104}; 104};
105 105
106#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
107#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
108
109struct radeon_px_quirk {
110 u32 chip_vendor;
111 u32 chip_device;
112 u32 subsys_vendor;
113 u32 subsys_device;
114 u32 px_quirk_flags;
115};
116
117static struct radeon_px_quirk radeon_px_quirk_list[] = {
118 /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
119 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
120 */
121 { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
122 /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
123 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
124 */
125 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
126 /* macbook pro 8.2 */
127 { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
128 { 0, 0, 0, 0, 0 },
129};
130
106bool radeon_is_px(struct drm_device *dev) 131bool radeon_is_px(struct drm_device *dev)
107{ 132{
108 struct radeon_device *rdev = dev->dev_private; 133 struct radeon_device *rdev = dev->dev_private;
@@ -112,6 +137,26 @@ bool radeon_is_px(struct drm_device *dev)
112 return false; 137 return false;
113} 138}
114 139
140static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
141{
142 struct radeon_px_quirk *p = radeon_px_quirk_list;
143
144 /* Apply PX quirks */
145 while (p && p->chip_device != 0) {
146 if (rdev->pdev->vendor == p->chip_vendor &&
147 rdev->pdev->device == p->chip_device &&
148 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
149 rdev->pdev->subsystem_device == p->subsys_device) {
150 rdev->px_quirk_flags = p->px_quirk_flags;
151 break;
152 }
153 ++p;
154 }
155
156 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
157 rdev->flags &= ~RADEON_IS_PX;
158}
159
115/** 160/**
116 * radeon_program_register_sequence - program an array of registers. 161 * radeon_program_register_sequence - program an array of registers.
117 * 162 *
@@ -385,7 +430,8 @@ int radeon_wb_init(struct radeon_device *rdev)
385 430
386 if (rdev->wb.wb_obj == NULL) { 431 if (rdev->wb.wb_obj == NULL) {
387 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 432 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
388 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj); 433 RADEON_GEM_DOMAIN_GTT, 0, NULL,
434 &rdev->wb.wb_obj);
389 if (r) { 435 if (r) {
390 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 436 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
391 return r; 437 return r;
@@ -1077,7 +1123,19 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1077 /* defines number of bits in page table versus page directory, 1123 /* defines number of bits in page table versus page directory,
1078 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1124 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1079 * page table and the remaining bits are in the page directory */ 1125 * page table and the remaining bits are in the page directory */
1080 if (radeon_vm_block_size < 9) { 1126 if (radeon_vm_block_size == -1) {
1127
1128 /* Total bits covered by PD + PTs */
1129 unsigned bits = ilog2(radeon_vm_size) + 17;
1130
1131 /* Make sure the PD is 4K in size up to 8GB address space.
1132 Above that split equal between PD and PTs */
1133 if (radeon_vm_size <= 8)
1134 radeon_vm_block_size = bits - 9;
1135 else
1136 radeon_vm_block_size = (bits + 3) / 2;
1137
1138 } else if (radeon_vm_block_size < 9) {
1081 dev_warn(rdev->dev, "VM page table size (%d) too small\n", 1139 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1082 radeon_vm_block_size); 1140 radeon_vm_block_size);
1083 radeon_vm_block_size = 9; 1141 radeon_vm_block_size = 9;
@@ -1092,25 +1150,6 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1092} 1150}
1093 1151
1094/** 1152/**
1095 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
1096 * needed for waking up.
1097 *
1098 * @pdev: pci dev pointer
1099 */
1100static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
1101{
1102
1103 /* 6600m in a macbook pro */
1104 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1105 pdev->subsystem_device == 0x00e2) {
1106 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
1107 return true;
1108 }
1109
1110 return false;
1111}
1112
1113/**
1114 * radeon_switcheroo_set_state - set switcheroo state 1153 * radeon_switcheroo_set_state - set switcheroo state
1115 * 1154 *
1116 * @pdev: pci dev pointer 1155 * @pdev: pci dev pointer
@@ -1122,6 +1161,7 @@ static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
1122static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 1161static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1123{ 1162{
1124 struct drm_device *dev = pci_get_drvdata(pdev); 1163 struct drm_device *dev = pci_get_drvdata(pdev);
1164 struct radeon_device *rdev = dev->dev_private;
1125 1165
1126 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF) 1166 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1127 return; 1167 return;
@@ -1133,7 +1173,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
1133 /* don't suspend or resume card normally */ 1173 /* don't suspend or resume card normally */
1134 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1174 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1135 1175
1136 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev)) 1176 if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
1137 dev->pdev->d3_delay = 20; 1177 dev->pdev->d3_delay = 20;
1138 1178
1139 radeon_resume_kms(dev, true, true); 1179 radeon_resume_kms(dev, true, true);
@@ -1337,6 +1377,9 @@ int radeon_device_init(struct radeon_device *rdev,
1337 if (rdev->rio_mem == NULL) 1377 if (rdev->rio_mem == NULL)
1338 DRM_ERROR("Unable to find PCI I/O BAR\n"); 1378 DRM_ERROR("Unable to find PCI I/O BAR\n");
1339 1379
1380 if (rdev->flags & RADEON_IS_PX)
1381 radeon_device_handle_px_quirks(rdev);
1382
1340 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 1383 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
1341 /* this will fail for cards that aren't VGA class devices, just 1384 /* this will fail for cards that aren't VGA class devices, just
1342 * ignore it */ 1385 * ignore it */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bf25061c8ac4..3fdf87318069 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -293,6 +293,18 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
293 if (radeon_crtc == NULL) 293 if (radeon_crtc == NULL)
294 return; 294 return;
295 295
296 /* Skip the pageflip completion check below (based on polling) on
297 * asics which reliably support hw pageflip completion irqs. pflip
298 * irqs are a reliable and race-free method of handling pageflip
299 * completion detection. A use_pflipirq module parameter < 2 allows
300 * to override this in case of asics with faulty pflip irqs.
301 * A module parameter of 0 would only use this polling based path,
302 * a parameter of 1 would use pflip irq only as a backup to this
303 * path, as in Linux 3.16.
304 */
305 if ((radeon_use_pflipirq == 2) && ASIC_IS_DCE4(rdev))
306 return;
307
296 spin_lock_irqsave(&rdev->ddev->event_lock, flags); 308 spin_lock_irqsave(&rdev->ddev->event_lock, flags);
297 if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { 309 if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
298 DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " 310 DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
@@ -823,64 +835,6 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
823 return ret; 835 return ret;
824} 836}
825 837
826int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
827{
828 struct drm_device *dev = radeon_connector->base.dev;
829 struct radeon_device *rdev = dev->dev_private;
830 int ret = 0;
831
832 /* don't leak the edid if we already fetched it in detect() */
833 if (radeon_connector->edid)
834 goto got_edid;
835
836 /* on hw with routers, select right port */
837 if (radeon_connector->router.ddc_valid)
838 radeon_router_select_ddc_port(radeon_connector);
839
840 if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
841 ENCODER_OBJECT_ID_NONE) {
842 if (radeon_connector->ddc_bus->has_aux)
843 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
844 &radeon_connector->ddc_bus->aux.ddc);
845 } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
846 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
847 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
848
849 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
850 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
851 radeon_connector->ddc_bus->has_aux)
852 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
853 &radeon_connector->ddc_bus->aux.ddc);
854 else if (radeon_connector->ddc_bus && !radeon_connector->edid)
855 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
856 &radeon_connector->ddc_bus->adapter);
857 } else {
858 if (radeon_connector->ddc_bus && !radeon_connector->edid)
859 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
860 &radeon_connector->ddc_bus->adapter);
861 }
862
863 if (!radeon_connector->edid) {
864 if (rdev->is_atom_bios) {
865 /* some laptops provide a hardcoded edid in rom for LCDs */
866 if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
867 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
868 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
869 } else
870 /* some servers provide a hardcoded edid in rom for KVMs */
871 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
872 }
873 if (radeon_connector->edid) {
874got_edid:
875 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
876 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
877 drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
878 return ret;
879 }
880 drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
881 return 0;
882}
883
884/* avivo */ 838/* avivo */
885 839
886/** 840/**
@@ -1749,7 +1703,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1749 (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && 1703 (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
1750 ((radeon_encoder->underscan_type == UNDERSCAN_ON) || 1704 ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
1751 ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && 1705 ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
1752 drm_detect_hdmi_monitor(radeon_connector->edid) && 1706 drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
1753 is_hdtv_mode(mode)))) { 1707 is_hdtv_mode(mode)))) {
1754 if (radeon_encoder->underscan_hborder != 0) 1708 if (radeon_encoder->underscan_hborder != 0)
1755 radeon_crtc->h_border = radeon_encoder->underscan_hborder; 1709 radeon_crtc->h_border = radeon_encoder->underscan_hborder;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 959f0866d993..092d067f93e1 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -82,9 +82,11 @@
82 * 2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN), 82 * 2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN),
83 * CIK: 1D and linear tiling modes contain valid PIPE_CONFIG 83 * CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
84 * 2.39.0 - Add INFO query for number of active CUs 84 * 2.39.0 - Add INFO query for number of active CUs
85 * 2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
86 * CS to GPU
85 */ 87 */
86#define KMS_DRIVER_MAJOR 2 88#define KMS_DRIVER_MAJOR 2
87#define KMS_DRIVER_MINOR 39 89#define KMS_DRIVER_MINOR 40
88#define KMS_DRIVER_PATCHLEVEL 0 90#define KMS_DRIVER_PATCHLEVEL 0
89int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 91int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
90int radeon_driver_unload_kms(struct drm_device *dev); 92int radeon_driver_unload_kms(struct drm_device *dev);
@@ -174,9 +176,10 @@ int radeon_dpm = -1;
174int radeon_aspm = -1; 176int radeon_aspm = -1;
175int radeon_runtime_pm = -1; 177int radeon_runtime_pm = -1;
176int radeon_hard_reset = 0; 178int radeon_hard_reset = 0;
177int radeon_vm_size = 4; 179int radeon_vm_size = 8;
178int radeon_vm_block_size = 9; 180int radeon_vm_block_size = -1;
179int radeon_deep_color = 0; 181int radeon_deep_color = 0;
182int radeon_use_pflipirq = 2;
180 183
181MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 184MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
182module_param_named(no_wb, radeon_no_wb, int, 0444); 185module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -247,12 +250,15 @@ module_param_named(hard_reset, radeon_hard_reset, int, 0444);
247MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)"); 250MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
248module_param_named(vm_size, radeon_vm_size, int, 0444); 251module_param_named(vm_size, radeon_vm_size, int, 0444);
249 252
250MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)"); 253MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
251module_param_named(vm_block_size, radeon_vm_block_size, int, 0444); 254module_param_named(vm_block_size, radeon_vm_block_size, int, 0444);
252 255
253MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))"); 256MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
254module_param_named(deep_color, radeon_deep_color, int, 0444); 257module_param_named(deep_color, radeon_deep_color, int, 0444);
255 258
259MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))");
260module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
261
256static struct pci_device_id pciidlist[] = { 262static struct pci_device_id pciidlist[] = {
257 radeon_PCI_IDS 263 radeon_PCI_IDS
258}; 264};
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index bd4959ca23aa..3c2094c25b53 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -343,7 +343,7 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
343 case DRM_MODE_CONNECTOR_HDMIB: 343 case DRM_MODE_CONNECTOR_HDMIB:
344 if (radeon_connector->use_digital) { 344 if (radeon_connector->use_digital) {
345 /* HDMI 1.3 supports up to 340 Mhz over single link */ 345 /* HDMI 1.3 supports up to 340 Mhz over single link */
346 if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) { 346 if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
347 if (pixel_clock > 340000) 347 if (pixel_clock > 340000)
348 return true; 348 return true;
349 else 349 else
@@ -365,7 +365,7 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
365 return false; 365 return false;
366 else { 366 else {
367 /* HDMI 1.3 supports up to 340 Mhz over single link */ 367 /* HDMI 1.3 supports up to 340 Mhz over single link */
368 if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) { 368 if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
369 if (pixel_clock > 340000) 369 if (pixel_clock > 340000)
370 return true; 370 return true;
371 else 371 else
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 665ced3b7313..94b0f2aa3d7c 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -127,8 +127,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
127 aligned_size = ALIGN(size, PAGE_SIZE); 127 aligned_size = ALIGN(size, PAGE_SIZE);
128 ret = radeon_gem_object_create(rdev, aligned_size, 0, 128 ret = radeon_gem_object_create(rdev, aligned_size, 0,
129 RADEON_GEM_DOMAIN_VRAM, 129 RADEON_GEM_DOMAIN_VRAM,
130 false, true, 130 0, true, &gobj);
131 &gobj);
132 if (ret) { 131 if (ret) {
133 printk(KERN_ERR "failed to allocate framebuffer (%d)\n", 132 printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
134 aligned_size); 133 aligned_size);
@@ -331,7 +330,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
331 return 0; 330 return 0;
332} 331}
333 332
334static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { 333static const struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
335 .gamma_set = radeon_crtc_fb_gamma_set, 334 .gamma_set = radeon_crtc_fb_gamma_set,
336 .gamma_get = radeon_crtc_fb_gamma_get, 335 .gamma_get = radeon_crtc_fb_gamma_get,
337 .fb_probe = radeonfb_create, 336 .fb_probe = radeonfb_create,
@@ -353,7 +352,9 @@ int radeon_fbdev_init(struct radeon_device *rdev)
353 352
354 rfbdev->rdev = rdev; 353 rfbdev->rdev = rdev;
355 rdev->mode_info.rfbdev = rfbdev; 354 rdev->mode_info.rfbdev = rfbdev;
356 rfbdev->helper.funcs = &radeon_fb_helper_funcs; 355
356 drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper,
357 &radeon_fb_helper_funcs);
357 358
358 ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, 359 ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
359 rdev->num_crtc, 360 rdev->num_crtc,
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 2e723651069b..a053a0779aac 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -128,7 +128,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
128 if (rdev->gart.robj == NULL) { 128 if (rdev->gart.robj == NULL) {
129 r = radeon_bo_create(rdev, rdev->gart.table_size, 129 r = radeon_bo_create(rdev, rdev->gart.table_size,
130 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 130 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
131 NULL, &rdev->gart.robj); 131 0, NULL, &rdev->gart.robj);
132 if (r) { 132 if (r) {
133 return r; 133 return r;
134 } 134 }
@@ -243,7 +243,8 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
243 page_base = rdev->gart.pages_addr[p]; 243 page_base = rdev->gart.pages_addr[p];
244 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 244 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
245 if (rdev->gart.ptr) { 245 if (rdev->gart.ptr) {
246 radeon_gart_set_page(rdev, t, page_base); 246 radeon_gart_set_page(rdev, t, page_base,
247 RADEON_GART_PAGE_DUMMY);
247 } 248 }
248 page_base += RADEON_GPU_PAGE_SIZE; 249 page_base += RADEON_GPU_PAGE_SIZE;
249 } 250 }
@@ -261,13 +262,15 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
261 * @pages: number of pages to bind 262 * @pages: number of pages to bind
262 * @pagelist: pages to bind 263 * @pagelist: pages to bind
263 * @dma_addr: DMA addresses of pages 264 * @dma_addr: DMA addresses of pages
265 * @flags: RADEON_GART_PAGE_* flags
264 * 266 *
265 * Binds the requested pages to the gart page table 267 * Binds the requested pages to the gart page table
266 * (all asics). 268 * (all asics).
267 * Returns 0 for success, -EINVAL for failure. 269 * Returns 0 for success, -EINVAL for failure.
268 */ 270 */
269int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, 271int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
270 int pages, struct page **pagelist, dma_addr_t *dma_addr) 272 int pages, struct page **pagelist, dma_addr_t *dma_addr,
273 uint32_t flags)
271{ 274{
272 unsigned t; 275 unsigned t;
273 unsigned p; 276 unsigned p;
@@ -287,7 +290,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
287 if (rdev->gart.ptr) { 290 if (rdev->gart.ptr) {
288 page_base = rdev->gart.pages_addr[p]; 291 page_base = rdev->gart.pages_addr[p];
289 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 292 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
290 radeon_gart_set_page(rdev, t, page_base); 293 radeon_gart_set_page(rdev, t, page_base, flags);
291 page_base += RADEON_GPU_PAGE_SIZE; 294 page_base += RADEON_GPU_PAGE_SIZE;
292 } 295 }
293 } 296 }
@@ -298,33 +301,6 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
298} 301}
299 302
300/** 303/**
301 * radeon_gart_restore - bind all pages in the gart page table
302 *
303 * @rdev: radeon_device pointer
304 *
305 * Binds all pages in the gart page table (all asics).
306 * Used to rebuild the gart table on device startup or resume.
307 */
308void radeon_gart_restore(struct radeon_device *rdev)
309{
310 int i, j, t;
311 u64 page_base;
312
313 if (!rdev->gart.ptr) {
314 return;
315 }
316 for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
317 page_base = rdev->gart.pages_addr[i];
318 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
319 radeon_gart_set_page(rdev, t, page_base);
320 page_base += RADEON_GPU_PAGE_SIZE;
321 }
322 }
323 mb();
324 radeon_gart_tlb_flush(rdev);
325}
326
327/**
328 * radeon_gart_init - init the driver info for managing the gart 304 * radeon_gart_init - init the driver info for managing the gart
329 * 305 *
330 * @rdev: radeon_device pointer 306 * @rdev: radeon_device pointer
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d09650c1d720..bfd7e1b0ff3f 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -40,9 +40,9 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
40 } 40 }
41} 41}
42 42
43int radeon_gem_object_create(struct radeon_device *rdev, int size, 43int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
44 int alignment, int initial_domain, 44 int alignment, int initial_domain,
45 bool discardable, bool kernel, 45 u32 flags, bool kernel,
46 struct drm_gem_object **obj) 46 struct drm_gem_object **obj)
47{ 47{
48 struct radeon_bo *robj; 48 struct radeon_bo *robj;
@@ -55,23 +55,26 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
55 alignment = PAGE_SIZE; 55 alignment = PAGE_SIZE;
56 } 56 }
57 57
58 /* maximun bo size is the minimun btw visible vram and gtt size */ 58 /* Maximum bo size is the unpinned gtt size since we use the gtt to
59 max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); 59 * handle vram to system pool migrations.
60 */
61 max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
60 if (size > max_size) { 62 if (size > max_size) {
61 printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n", 63 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
62 __func__, __LINE__, size >> 20, max_size >> 20); 64 size >> 20, max_size >> 20);
63 return -ENOMEM; 65 return -ENOMEM;
64 } 66 }
65 67
66retry: 68retry:
67 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj); 69 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
70 flags, NULL, &robj);
68 if (r) { 71 if (r) {
69 if (r != -ERESTARTSYS) { 72 if (r != -ERESTARTSYS) {
70 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { 73 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
71 initial_domain |= RADEON_GEM_DOMAIN_GTT; 74 initial_domain |= RADEON_GEM_DOMAIN_GTT;
72 goto retry; 75 goto retry;
73 } 76 }
74 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", 77 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
75 size, initial_domain, alignment, r); 78 size, initial_domain, alignment, r);
76 } 79 }
77 return r; 80 return r;
@@ -208,18 +211,15 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
208 struct radeon_device *rdev = dev->dev_private; 211 struct radeon_device *rdev = dev->dev_private;
209 struct drm_radeon_gem_info *args = data; 212 struct drm_radeon_gem_info *args = data;
210 struct ttm_mem_type_manager *man; 213 struct ttm_mem_type_manager *man;
211 unsigned i;
212 214
213 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 215 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
214 216
215 args->vram_size = rdev->mc.real_vram_size; 217 args->vram_size = rdev->mc.real_vram_size;
216 args->vram_visible = (u64)man->size << PAGE_SHIFT; 218 args->vram_visible = (u64)man->size << PAGE_SHIFT;
217 if (rdev->stollen_vga_memory) 219 args->vram_visible -= rdev->vram_pin_size;
218 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); 220 args->gart_size = rdev->mc.gtt_size;
219 args->vram_visible -= radeon_fbdev_total_size(rdev); 221 args->gart_size -= rdev->gart_pin_size;
220 args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024; 222
221 for(i = 0; i < RADEON_NUM_RINGS; ++i)
222 args->gart_size -= rdev->ring[i].ring_size;
223 return 0; 223 return 0;
224} 224}
225 225
@@ -252,8 +252,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
252 /* create a gem object to contain this object in */ 252 /* create a gem object to contain this object in */
253 args->size = roundup(args->size, PAGE_SIZE); 253 args->size = roundup(args->size, PAGE_SIZE);
254 r = radeon_gem_object_create(rdev, args->size, args->alignment, 254 r = radeon_gem_object_create(rdev, args->size, args->alignment,
255 args->initial_domain, false, 255 args->initial_domain, args->flags,
256 false, &gobj); 256 false, &gobj);
257 if (r) { 257 if (r) {
258 up_read(&rdev->exclusive_lock); 258 up_read(&rdev->exclusive_lock);
259 r = radeon_gem_handle_lockup(rdev, r); 259 r = radeon_gem_handle_lockup(rdev, r);
@@ -358,16 +358,18 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
358 struct drm_gem_object *gobj; 358 struct drm_gem_object *gobj;
359 struct radeon_bo *robj; 359 struct radeon_bo *robj;
360 int r; 360 int r;
361 uint32_t cur_placement = 0;
361 362
362 gobj = drm_gem_object_lookup(dev, filp, args->handle); 363 gobj = drm_gem_object_lookup(dev, filp, args->handle);
363 if (gobj == NULL) { 364 if (gobj == NULL) {
364 return -ENOENT; 365 return -ENOENT;
365 } 366 }
366 robj = gem_to_radeon_bo(gobj); 367 robj = gem_to_radeon_bo(gobj);
367 r = radeon_bo_wait(robj, NULL, false); 368 r = radeon_bo_wait(robj, &cur_placement, false);
368 /* callback hw specific functions if any */ 369 /* Flush HDP cache via MMIO if necessary */
369 if (rdev->asic->ioctl_wait_idle) 370 if (rdev->asic->mmio_hdp_flush &&
370 robj->rdev->asic->ioctl_wait_idle(rdev, robj); 371 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
372 robj->rdev->asic->mmio_hdp_flush(rdev);
371 drm_gem_object_unreference_unlocked(gobj); 373 drm_gem_object_unreference_unlocked(gobj);
372 r = radeon_gem_handle_lockup(rdev, r); 374 r = radeon_gem_handle_lockup(rdev, r);
373 return r; 375 return r;
@@ -461,11 +463,6 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
461 args->operation = RADEON_VA_RESULT_ERROR; 463 args->operation = RADEON_VA_RESULT_ERROR;
462 return -EINVAL; 464 return -EINVAL;
463 } 465 }
464 if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
465 dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
466 args->operation = RADEON_VA_RESULT_ERROR;
467 return -EINVAL;
468 }
469 466
470 switch (args->operation) { 467 switch (args->operation) {
471 case RADEON_VA_MAP: 468 case RADEON_VA_MAP:
@@ -499,9 +496,9 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
499 496
500 switch (args->operation) { 497 switch (args->operation) {
501 case RADEON_VA_MAP: 498 case RADEON_VA_MAP:
502 if (bo_va->soffset) { 499 if (bo_va->it.start) {
503 args->operation = RADEON_VA_RESULT_VA_EXIST; 500 args->operation = RADEON_VA_RESULT_VA_EXIST;
504 args->offset = bo_va->soffset; 501 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
505 goto out; 502 goto out;
506 } 503 }
507 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); 504 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
@@ -572,9 +569,8 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
572 args->size = ALIGN(args->size, PAGE_SIZE); 569 args->size = ALIGN(args->size, PAGE_SIZE);
573 570
574 r = radeon_gem_object_create(rdev, args->size, 0, 571 r = radeon_gem_object_create(rdev, args->size, 0,
575 RADEON_GEM_DOMAIN_VRAM, 572 RADEON_GEM_DOMAIN_VRAM, 0,
576 false, ttm_bo_type_device, 573 false, &gobj);
577 &gobj);
578 if (r) 574 if (r)
579 return -ENOMEM; 575 return -ENOMEM;
580 576
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
new file mode 100644
index 000000000000..65b0c213488d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -0,0 +1,319 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 * Christian König
28 */
29#include <drm/drmP.h>
30#include "radeon.h"
31
32/*
33 * IB
34 * IBs (Indirect Buffers) and areas of GPU accessible memory where
35 * commands are stored. You can put a pointer to the IB in the
36 * command ring and the hw will fetch the commands from the IB
37 * and execute them. Generally userspace acceleration drivers
38 * produce command buffers which are send to the kernel and
39 * put in IBs for execution by the requested ring.
40 */
41static int radeon_debugfs_sa_init(struct radeon_device *rdev);
42
43/**
44 * radeon_ib_get - request an IB (Indirect Buffer)
45 *
46 * @rdev: radeon_device pointer
47 * @ring: ring index the IB is associated with
48 * @ib: IB object returned
49 * @size: requested IB size
50 *
51 * Request an IB (all asics). IBs are allocated using the
52 * suballocator.
53 * Returns 0 on success, error on failure.
54 */
55int radeon_ib_get(struct radeon_device *rdev, int ring,
56 struct radeon_ib *ib, struct radeon_vm *vm,
57 unsigned size)
58{
59 int r;
60
61 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
62 if (r) {
63 dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
64 return r;
65 }
66
67 r = radeon_semaphore_create(rdev, &ib->semaphore);
68 if (r) {
69 return r;
70 }
71
72 ib->ring = ring;
73 ib->fence = NULL;
74 ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
75 ib->vm = vm;
76 if (vm) {
77 /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
78 * space and soffset is the offset inside the pool bo
79 */
80 ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
81 } else {
82 ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
83 }
84 ib->is_const_ib = false;
85
86 return 0;
87}
88
89/**
90 * radeon_ib_free - free an IB (Indirect Buffer)
91 *
92 * @rdev: radeon_device pointer
93 * @ib: IB object to free
94 *
95 * Free an IB (all asics).
96 */
97void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
98{
99 radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
100 radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
101 radeon_fence_unref(&ib->fence);
102}
103
104/**
105 * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
106 *
107 * @rdev: radeon_device pointer
108 * @ib: IB object to schedule
109 * @const_ib: Const IB to schedule (SI only)
110 *
111 * Schedule an IB on the associated ring (all asics).
112 * Returns 0 on success, error on failure.
113 *
114 * On SI, there are two parallel engines fed from the primary ring,
115 * the CE (Constant Engine) and the DE (Drawing Engine). Since
116 * resource descriptors have moved to memory, the CE allows you to
117 * prime the caches while the DE is updating register state so that
118 * the resource descriptors will be already in cache when the draw is
119 * processed. To accomplish this, the userspace driver submits two
120 * IBs, one for the CE and one for the DE. If there is a CE IB (called
121 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
122 * to SI there was just a DE IB.
123 */
124int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
125 struct radeon_ib *const_ib)
126{
127 struct radeon_ring *ring = &rdev->ring[ib->ring];
128 int r = 0;
129
130 if (!ib->length_dw || !ring->ready) {
131 /* TODO: Nothings in the ib we should report. */
132 dev_err(rdev->dev, "couldn't schedule ib\n");
133 return -EINVAL;
134 }
135
136 /* 64 dwords should be enough for fence too */
137 r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
138 if (r) {
139 dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
140 return r;
141 }
142
143 /* grab a vm id if necessary */
144 if (ib->vm) {
145 struct radeon_fence *vm_id_fence;
146 vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
147 radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
148 }
149
150 /* sync with other rings */
151 r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
152 if (r) {
153 dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
154 radeon_ring_unlock_undo(rdev, ring);
155 return r;
156 }
157
158 if (ib->vm)
159 radeon_vm_flush(rdev, ib->vm, ib->ring);
160
161 if (const_ib) {
162 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
163 radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
164 }
165 radeon_ring_ib_execute(rdev, ib->ring, ib);
166 r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
167 if (r) {
168 dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
169 radeon_ring_unlock_undo(rdev, ring);
170 return r;
171 }
172 if (const_ib) {
173 const_ib->fence = radeon_fence_ref(ib->fence);
174 }
175
176 if (ib->vm)
177 radeon_vm_fence(rdev, ib->vm, ib->fence);
178
179 radeon_ring_unlock_commit(rdev, ring);
180 return 0;
181}
182
183/**
184 * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
185 *
186 * @rdev: radeon_device pointer
187 *
188 * Initialize the suballocator to manage a pool of memory
189 * for use as IBs (all asics).
190 * Returns 0 on success, error on failure.
191 */
192int radeon_ib_pool_init(struct radeon_device *rdev)
193{
194 int r;
195
196 if (rdev->ib_pool_ready) {
197 return 0;
198 }
199
200 if (rdev->family >= CHIP_BONAIRE) {
201 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
202 RADEON_IB_POOL_SIZE*64*1024,
203 RADEON_GPU_PAGE_SIZE,
204 RADEON_GEM_DOMAIN_GTT,
205 RADEON_GEM_GTT_WC);
206 } else {
207 /* Before CIK, it's better to stick to cacheable GTT due
208 * to the command stream checking
209 */
210 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
211 RADEON_IB_POOL_SIZE*64*1024,
212 RADEON_GPU_PAGE_SIZE,
213 RADEON_GEM_DOMAIN_GTT, 0);
214 }
215 if (r) {
216 return r;
217 }
218
219 r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
220 if (r) {
221 return r;
222 }
223
224 rdev->ib_pool_ready = true;
225 if (radeon_debugfs_sa_init(rdev)) {
226 dev_err(rdev->dev, "failed to register debugfs file for SA\n");
227 }
228 return 0;
229}
230
231/**
232 * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
233 *
234 * @rdev: radeon_device pointer
235 *
236 * Tear down the suballocator managing the pool of memory
237 * for use as IBs (all asics).
238 */
239void radeon_ib_pool_fini(struct radeon_device *rdev)
240{
241 if (rdev->ib_pool_ready) {
242 radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
243 radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
244 rdev->ib_pool_ready = false;
245 }
246}
247
248/**
249 * radeon_ib_ring_tests - test IBs on the rings
250 *
251 * @rdev: radeon_device pointer
252 *
253 * Test an IB (Indirect Buffer) on each ring.
254 * If the test fails, disable the ring.
255 * Returns 0 on success, error if the primary GFX ring
256 * IB test fails.
257 */
258int radeon_ib_ring_tests(struct radeon_device *rdev)
259{
260 unsigned i;
261 int r;
262
263 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
264 struct radeon_ring *ring = &rdev->ring[i];
265
266 if (!ring->ready)
267 continue;
268
269 r = radeon_ib_test(rdev, i, ring);
270 if (r) {
271 ring->ready = false;
272 rdev->needs_reset = false;
273
274 if (i == RADEON_RING_TYPE_GFX_INDEX) {
275 /* oh, oh, that's really bad */
276 DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
277 rdev->accel_working = false;
278 return r;
279
280 } else {
281 /* still not good, but we can live with it */
282 DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
283 }
284 }
285 }
286 return 0;
287}
288
289/*
290 * Debugfs info
291 */
292#if defined(CONFIG_DEBUG_FS)
293
294static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
295{
296 struct drm_info_node *node = (struct drm_info_node *) m->private;
297 struct drm_device *dev = node->minor->dev;
298 struct radeon_device *rdev = dev->dev_private;
299
300 radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
301
302 return 0;
303
304}
305
306static struct drm_info_list radeon_debugfs_sa_list[] = {
307 {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
308};
309
310#endif
311
312static int radeon_debugfs_sa_init(struct radeon_device *rdev)
313{
314#if defined(CONFIG_DEBUG_FS)
315 return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
316#else
317 return 0;
318#endif
319}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index d25ae6acfd5a..eb7164d07985 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -254,7 +254,18 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
254 } 254 }
255 break; 255 break;
256 case RADEON_INFO_ACCEL_WORKING2: 256 case RADEON_INFO_ACCEL_WORKING2:
257 *value = rdev->accel_working; 257 if (rdev->family == CHIP_HAWAII) {
258 if (rdev->accel_working) {
259 if (rdev->new_fw)
260 *value = 3;
261 else
262 *value = 2;
263 } else {
264 *value = 0;
265 }
266 } else {
267 *value = rdev->accel_working;
268 }
258 break; 269 break;
259 case RADEON_INFO_TILING_CONFIG: 270 case RADEON_INFO_TILING_CONFIG:
260 if (rdev->family >= CHIP_BONAIRE) 271 if (rdev->family >= CHIP_BONAIRE)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 0592ddb0904b..e27608c29c11 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -685,10 +685,11 @@ extern bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
685 685
686extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder); 686extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
687extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector); 687extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
688extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
689extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector); 688extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
690extern int radeon_get_monitor_bpc(struct drm_connector *connector); 689extern int radeon_get_monitor_bpc(struct drm_connector *connector);
691 690
691extern struct edid *radeon_connector_edid(struct drm_connector *connector);
692
692extern void radeon_connector_hotplug(struct drm_connector *connector); 693extern void radeon_connector_hotplug(struct drm_connector *connector);
693extern int radeon_dp_mode_valid_helper(struct drm_connector *connector, 694extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
694 struct drm_display_mode *mode); 695 struct drm_display_mode *mode);
@@ -738,7 +739,6 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
738extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); 739extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
739extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); 740extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
740extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux); 741extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
741extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
742 742
743extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); 743extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
744 744
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 6c717b257d6d..480c87d8edc5 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -46,16 +46,6 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
46 * function are calling it. 46 * function are calling it.
47 */ 47 */
48 48
49static void radeon_bo_clear_va(struct radeon_bo *bo)
50{
51 struct radeon_bo_va *bo_va, *tmp;
52
53 list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
54 /* remove from all vm address space */
55 radeon_vm_bo_rmv(bo->rdev, bo_va);
56 }
57}
58
59static void radeon_update_memory_usage(struct radeon_bo *bo, 49static void radeon_update_memory_usage(struct radeon_bo *bo,
60 unsigned mem_type, int sign) 50 unsigned mem_type, int sign)
61{ 51{
@@ -90,7 +80,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
90 list_del_init(&bo->list); 80 list_del_init(&bo->list);
91 mutex_unlock(&bo->rdev->gem.mutex); 81 mutex_unlock(&bo->rdev->gem.mutex);
92 radeon_bo_clear_surface_reg(bo); 82 radeon_bo_clear_surface_reg(bo);
93 radeon_bo_clear_va(bo); 83 WARN_ON(!list_empty(&bo->va));
94 drm_gem_object_release(&bo->gem_base); 84 drm_gem_object_release(&bo->gem_base);
95 kfree(bo); 85 kfree(bo);
96} 86}
@@ -114,15 +104,23 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
114 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 104 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
115 TTM_PL_FLAG_VRAM; 105 TTM_PL_FLAG_VRAM;
116 if (domain & RADEON_GEM_DOMAIN_GTT) { 106 if (domain & RADEON_GEM_DOMAIN_GTT) {
117 if (rbo->rdev->flags & RADEON_IS_AGP) { 107 if (rbo->flags & RADEON_GEM_GTT_UC) {
118 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT; 108 rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT;
109 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
110 (rbo->rdev->flags & RADEON_IS_AGP)) {
111 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
112 TTM_PL_FLAG_TT;
119 } else { 113 } else {
120 rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; 114 rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
121 } 115 }
122 } 116 }
123 if (domain & RADEON_GEM_DOMAIN_CPU) { 117 if (domain & RADEON_GEM_DOMAIN_CPU) {
124 if (rbo->rdev->flags & RADEON_IS_AGP) { 118 if (rbo->flags & RADEON_GEM_GTT_UC) {
125 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM; 119 rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM;
120 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
121 rbo->rdev->flags & RADEON_IS_AGP) {
122 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
123 TTM_PL_FLAG_SYSTEM;
126 } else { 124 } else {
127 rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; 125 rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
128 } 126 }
@@ -146,7 +144,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
146 144
147int radeon_bo_create(struct radeon_device *rdev, 145int radeon_bo_create(struct radeon_device *rdev,
148 unsigned long size, int byte_align, bool kernel, u32 domain, 146 unsigned long size, int byte_align, bool kernel, u32 domain,
149 struct sg_table *sg, struct radeon_bo **bo_ptr) 147 u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr)
150{ 148{
151 struct radeon_bo *bo; 149 struct radeon_bo *bo;
152 enum ttm_bo_type type; 150 enum ttm_bo_type type;
@@ -183,6 +181,12 @@ int radeon_bo_create(struct radeon_device *rdev,
183 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | 181 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
184 RADEON_GEM_DOMAIN_GTT | 182 RADEON_GEM_DOMAIN_GTT |
185 RADEON_GEM_DOMAIN_CPU); 183 RADEON_GEM_DOMAIN_CPU);
184
185 bo->flags = flags;
186 /* PCI GART is always snooped */
187 if (!(rdev->flags & RADEON_IS_PCIE))
188 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
189
186 radeon_ttm_placement_from_domain(bo, domain); 190 radeon_ttm_placement_from_domain(bo, domain);
187 /* Kernel allocation are uninterruptible */ 191 /* Kernel allocation are uninterruptible */
188 down_read(&rdev->pm.mclk_lock); 192 down_read(&rdev->pm.mclk_lock);
@@ -232,6 +236,15 @@ void radeon_bo_kunmap(struct radeon_bo *bo)
232 ttm_bo_kunmap(&bo->kmap); 236 ttm_bo_kunmap(&bo->kmap);
233} 237}
234 238
239struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
240{
241 if (bo == NULL)
242 return NULL;
243
244 ttm_bo_reference(&bo->tbo);
245 return bo;
246}
247
235void radeon_bo_unref(struct radeon_bo **bo) 248void radeon_bo_unref(struct radeon_bo **bo)
236{ 249{
237 struct ttm_buffer_object *tbo; 250 struct ttm_buffer_object *tbo;
@@ -241,9 +254,7 @@ void radeon_bo_unref(struct radeon_bo **bo)
241 return; 254 return;
242 rdev = (*bo)->rdev; 255 rdev = (*bo)->rdev;
243 tbo = &((*bo)->tbo); 256 tbo = &((*bo)->tbo);
244 down_read(&rdev->pm.mclk_lock);
245 ttm_bo_unref(&tbo); 257 ttm_bo_unref(&tbo);
246 up_read(&rdev->pm.mclk_lock);
247 if (tbo == NULL) 258 if (tbo == NULL)
248 *bo = NULL; 259 *bo = NULL;
249} 260}
@@ -292,9 +303,13 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
292 bo->pin_count = 1; 303 bo->pin_count = 1;
293 if (gpu_addr != NULL) 304 if (gpu_addr != NULL)
294 *gpu_addr = radeon_bo_gpu_offset(bo); 305 *gpu_addr = radeon_bo_gpu_offset(bo);
295 } 306 if (domain == RADEON_GEM_DOMAIN_VRAM)
296 if (unlikely(r != 0)) 307 bo->rdev->vram_pin_size += radeon_bo_size(bo);
308 else
309 bo->rdev->gart_pin_size += radeon_bo_size(bo);
310 } else {
297 dev_err(bo->rdev->dev, "%p pin failed\n", bo); 311 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
312 }
298 return r; 313 return r;
299} 314}
300 315
@@ -317,8 +332,14 @@ int radeon_bo_unpin(struct radeon_bo *bo)
317 for (i = 0; i < bo->placement.num_placement; i++) 332 for (i = 0; i < bo->placement.num_placement; i++)
318 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 333 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
319 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 334 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
320 if (unlikely(r != 0)) 335 if (likely(r == 0)) {
336 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
337 bo->rdev->vram_pin_size -= radeon_bo_size(bo);
338 else
339 bo->rdev->gart_pin_size -= radeon_bo_size(bo);
340 } else {
321 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 341 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
342 }
322 return r; 343 return r;
323} 344}
324 345
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 5a873f31a171..98a47fdf3625 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -124,11 +124,12 @@ extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
124 124
125extern int radeon_bo_create(struct radeon_device *rdev, 125extern int radeon_bo_create(struct radeon_device *rdev,
126 unsigned long size, int byte_align, 126 unsigned long size, int byte_align,
127 bool kernel, u32 domain, 127 bool kernel, u32 domain, u32 flags,
128 struct sg_table *sg, 128 struct sg_table *sg,
129 struct radeon_bo **bo_ptr); 129 struct radeon_bo **bo_ptr);
130extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); 130extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
131extern void radeon_bo_kunmap(struct radeon_bo *bo); 131extern void radeon_bo_kunmap(struct radeon_bo *bo);
132extern struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo);
132extern void radeon_bo_unref(struct radeon_bo **bo); 133extern void radeon_bo_unref(struct radeon_bo **bo);
133extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr); 134extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
134extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, 135extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
@@ -170,7 +171,8 @@ static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
170 171
171extern int radeon_sa_bo_manager_init(struct radeon_device *rdev, 172extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
172 struct radeon_sa_manager *sa_manager, 173 struct radeon_sa_manager *sa_manager,
173 unsigned size, u32 align, u32 domain); 174 unsigned size, u32 align, u32 domain,
175 u32 flags);
174extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev, 176extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
175 struct radeon_sa_manager *sa_manager); 177 struct radeon_sa_manager *sa_manager);
176extern int radeon_sa_bo_manager_start(struct radeon_device *rdev, 178extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index e447e390d09a..23314be49480 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1303,10 +1303,6 @@ int radeon_pm_init(struct radeon_device *rdev)
1303 case CHIP_RS780: 1303 case CHIP_RS780:
1304 case CHIP_RS880: 1304 case CHIP_RS880:
1305 case CHIP_RV770: 1305 case CHIP_RV770:
1306 case CHIP_BARTS:
1307 case CHIP_TURKS:
1308 case CHIP_CAICOS:
1309 case CHIP_CAYMAN:
1310 /* DPM requires the RLC, RV770+ dGPU requires SMC */ 1306 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1311 if (!rdev->rlc_fw) 1307 if (!rdev->rlc_fw)
1312 rdev->pm.pm_method = PM_METHOD_PROFILE; 1308 rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1330,6 +1326,10 @@ int radeon_pm_init(struct radeon_device *rdev)
1330 case CHIP_PALM: 1326 case CHIP_PALM:
1331 case CHIP_SUMO: 1327 case CHIP_SUMO:
1332 case CHIP_SUMO2: 1328 case CHIP_SUMO2:
1329 case CHIP_BARTS:
1330 case CHIP_TURKS:
1331 case CHIP_CAICOS:
1332 case CHIP_CAYMAN:
1333 case CHIP_ARUBA: 1333 case CHIP_ARUBA:
1334 case CHIP_TAHITI: 1334 case CHIP_TAHITI:
1335 case CHIP_PITCAIRN: 1335 case CHIP_PITCAIRN:
@@ -1400,9 +1400,7 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
1400 } 1400 }
1401 1401
1402 radeon_hwmon_fini(rdev); 1402 radeon_hwmon_fini(rdev);
1403 1403 kfree(rdev->pm.power_state);
1404 if (rdev->pm.power_state)
1405 kfree(rdev->pm.power_state);
1406} 1404}
1407 1405
1408static void radeon_pm_fini_dpm(struct radeon_device *rdev) 1406static void radeon_pm_fini_dpm(struct radeon_device *rdev)
@@ -1421,9 +1419,7 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1421 radeon_dpm_fini(rdev); 1419 radeon_dpm_fini(rdev);
1422 1420
1423 radeon_hwmon_fini(rdev); 1421 radeon_hwmon_fini(rdev);
1424 1422 kfree(rdev->pm.power_state);
1425 if (rdev->pm.power_state)
1426 kfree(rdev->pm.power_state);
1427} 1423}
1428 1424
1429void radeon_pm_fini(struct radeon_device *rdev) 1425void radeon_pm_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 28d71070c389..0b16f2cbcf17 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -65,7 +65,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
65 int ret; 65 int ret;
66 66
67 ret = radeon_bo_create(rdev, size, PAGE_SIZE, false, 67 ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
68 RADEON_GEM_DOMAIN_GTT, sg, &bo); 68 RADEON_GEM_DOMAIN_GTT, 0, sg, &bo);
69 if (ret) 69 if (ret)
70 return ERR_PTR(ret); 70 return ERR_PTR(ret);
71 71
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index f8050f5429e2..5b4e0cf231a0 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -26,258 +26,8 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 * Christian König 27 * Christian König
28 */ 28 */
29#include <linux/seq_file.h>
30#include <linux/slab.h>
31#include <drm/drmP.h> 29#include <drm/drmP.h>
32#include <drm/radeon_drm.h>
33#include "radeon_reg.h"
34#include "radeon.h" 30#include "radeon.h"
35#include "atom.h"
36
37/*
38 * IB
39 * IBs (Indirect Buffers) and areas of GPU accessible memory where
40 * commands are stored. You can put a pointer to the IB in the
41 * command ring and the hw will fetch the commands from the IB
42 * and execute them. Generally userspace acceleration drivers
43 * produce command buffers which are send to the kernel and
44 * put in IBs for execution by the requested ring.
45 */
46static int radeon_debugfs_sa_init(struct radeon_device *rdev);
47
48/**
49 * radeon_ib_get - request an IB (Indirect Buffer)
50 *
51 * @rdev: radeon_device pointer
52 * @ring: ring index the IB is associated with
53 * @ib: IB object returned
54 * @size: requested IB size
55 *
56 * Request an IB (all asics). IBs are allocated using the
57 * suballocator.
58 * Returns 0 on success, error on failure.
59 */
60int radeon_ib_get(struct radeon_device *rdev, int ring,
61 struct radeon_ib *ib, struct radeon_vm *vm,
62 unsigned size)
63{
64 int r;
65
66 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
67 if (r) {
68 dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
69 return r;
70 }
71
72 r = radeon_semaphore_create(rdev, &ib->semaphore);
73 if (r) {
74 return r;
75 }
76
77 ib->ring = ring;
78 ib->fence = NULL;
79 ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
80 ib->vm = vm;
81 if (vm) {
82 /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
83 * space and soffset is the offset inside the pool bo
84 */
85 ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
86 } else {
87 ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
88 }
89 ib->is_const_ib = false;
90
91 return 0;
92}
93
94/**
95 * radeon_ib_free - free an IB (Indirect Buffer)
96 *
97 * @rdev: radeon_device pointer
98 * @ib: IB object to free
99 *
100 * Free an IB (all asics).
101 */
102void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
103{
104 radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
105 radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
106 radeon_fence_unref(&ib->fence);
107}
108
109/**
110 * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
111 *
112 * @rdev: radeon_device pointer
113 * @ib: IB object to schedule
114 * @const_ib: Const IB to schedule (SI only)
115 *
116 * Schedule an IB on the associated ring (all asics).
117 * Returns 0 on success, error on failure.
118 *
119 * On SI, there are two parallel engines fed from the primary ring,
120 * the CE (Constant Engine) and the DE (Drawing Engine). Since
121 * resource descriptors have moved to memory, the CE allows you to
122 * prime the caches while the DE is updating register state so that
123 * the resource descriptors will be already in cache when the draw is
124 * processed. To accomplish this, the userspace driver submits two
125 * IBs, one for the CE and one for the DE. If there is a CE IB (called
126 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
127 * to SI there was just a DE IB.
128 */
129int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
130 struct radeon_ib *const_ib)
131{
132 struct radeon_ring *ring = &rdev->ring[ib->ring];
133 int r = 0;
134
135 if (!ib->length_dw || !ring->ready) {
136 /* TODO: Nothings in the ib we should report. */
137 dev_err(rdev->dev, "couldn't schedule ib\n");
138 return -EINVAL;
139 }
140
141 /* 64 dwords should be enough for fence too */
142 r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
143 if (r) {
144 dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
145 return r;
146 }
147
148 /* grab a vm id if necessary */
149 if (ib->vm) {
150 struct radeon_fence *vm_id_fence;
151 vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
152 radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
153 }
154
155 /* sync with other rings */
156 r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
157 if (r) {
158 dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
159 radeon_ring_unlock_undo(rdev, ring);
160 return r;
161 }
162
163 if (ib->vm)
164 radeon_vm_flush(rdev, ib->vm, ib->ring);
165
166 if (const_ib) {
167 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
168 radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
169 }
170 radeon_ring_ib_execute(rdev, ib->ring, ib);
171 r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
172 if (r) {
173 dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
174 radeon_ring_unlock_undo(rdev, ring);
175 return r;
176 }
177 if (const_ib) {
178 const_ib->fence = radeon_fence_ref(ib->fence);
179 }
180
181 if (ib->vm)
182 radeon_vm_fence(rdev, ib->vm, ib->fence);
183
184 radeon_ring_unlock_commit(rdev, ring);
185 return 0;
186}
187
188/**
189 * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
190 *
191 * @rdev: radeon_device pointer
192 *
193 * Initialize the suballocator to manage a pool of memory
194 * for use as IBs (all asics).
195 * Returns 0 on success, error on failure.
196 */
197int radeon_ib_pool_init(struct radeon_device *rdev)
198{
199 int r;
200
201 if (rdev->ib_pool_ready) {
202 return 0;
203 }
204 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
205 RADEON_IB_POOL_SIZE*64*1024,
206 RADEON_GPU_PAGE_SIZE,
207 RADEON_GEM_DOMAIN_GTT);
208 if (r) {
209 return r;
210 }
211
212 r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
213 if (r) {
214 return r;
215 }
216
217 rdev->ib_pool_ready = true;
218 if (radeon_debugfs_sa_init(rdev)) {
219 dev_err(rdev->dev, "failed to register debugfs file for SA\n");
220 }
221 return 0;
222}
223
224/**
225 * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
226 *
227 * @rdev: radeon_device pointer
228 *
229 * Tear down the suballocator managing the pool of memory
230 * for use as IBs (all asics).
231 */
232void radeon_ib_pool_fini(struct radeon_device *rdev)
233{
234 if (rdev->ib_pool_ready) {
235 radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
236 radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
237 rdev->ib_pool_ready = false;
238 }
239}
240
241/**
242 * radeon_ib_ring_tests - test IBs on the rings
243 *
244 * @rdev: radeon_device pointer
245 *
246 * Test an IB (Indirect Buffer) on each ring.
247 * If the test fails, disable the ring.
248 * Returns 0 on success, error if the primary GFX ring
249 * IB test fails.
250 */
251int radeon_ib_ring_tests(struct radeon_device *rdev)
252{
253 unsigned i;
254 int r;
255
256 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
257 struct radeon_ring *ring = &rdev->ring[i];
258
259 if (!ring->ready)
260 continue;
261
262 r = radeon_ib_test(rdev, i, ring);
263 if (r) {
264 ring->ready = false;
265 rdev->needs_reset = false;
266
267 if (i == RADEON_RING_TYPE_GFX_INDEX) {
268 /* oh, oh, that's really bad */
269 DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
270 rdev->accel_working = false;
271 return r;
272
273 } else {
274 /* still not good, but we can live with it */
275 DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
276 }
277 }
278 }
279 return 0;
280}
281 31
282/* 32/*
283 * Rings 33 * Rings
@@ -433,11 +183,21 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
433 */ 183 */
434void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) 184void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
435{ 185{
186 /* If we are emitting the HDP flush via the ring buffer, we need to
187 * do it before padding.
188 */
189 if (rdev->asic->ring[ring->idx]->hdp_flush)
190 rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
436 /* We pad to match fetch size */ 191 /* We pad to match fetch size */
437 while (ring->wptr & ring->align_mask) { 192 while (ring->wptr & ring->align_mask) {
438 radeon_ring_write(ring, ring->nop); 193 radeon_ring_write(ring, ring->nop);
439 } 194 }
440 mb(); 195 mb();
196 /* If we are emitting the HDP flush via MMIO, we need to do it after
197 * all CPU writes to VRAM finished.
198 */
199 if (rdev->asic->mmio_hdp_flush)
200 rdev->asic->mmio_hdp_flush(rdev);
441 radeon_ring_set_wptr(rdev, ring); 201 radeon_ring_set_wptr(rdev, ring);
442} 202}
443 203
@@ -641,6 +401,8 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
641 if (ring->ring_obj == NULL) { 401 if (ring->ring_obj == NULL) {
642 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, 402 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
643 RADEON_GEM_DOMAIN_GTT, 403 RADEON_GEM_DOMAIN_GTT,
404 (rdev->flags & RADEON_IS_PCIE) ?
405 RADEON_GEM_GTT_WC : 0,
644 NULL, &ring->ring_obj); 406 NULL, &ring->ring_obj);
645 if (r) { 407 if (r) {
646 dev_err(rdev->dev, "(%d) ring create failed\n", r); 408 dev_err(rdev->dev, "(%d) ring create failed\n", r);
@@ -791,22 +553,6 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = {
791 {"radeon_ring_vce2", radeon_debugfs_ring_info, 0, &si_vce2_index}, 553 {"radeon_ring_vce2", radeon_debugfs_ring_info, 0, &si_vce2_index},
792}; 554};
793 555
794static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
795{
796 struct drm_info_node *node = (struct drm_info_node *) m->private;
797 struct drm_device *dev = node->minor->dev;
798 struct radeon_device *rdev = dev->dev_private;
799
800 radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
801
802 return 0;
803
804}
805
806static struct drm_info_list radeon_debugfs_sa_list[] = {
807 {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
808};
809
810#endif 556#endif
811 557
812static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring) 558static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
@@ -828,12 +574,3 @@ static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ri
828#endif 574#endif
829 return 0; 575 return 0;
830} 576}
831
832static int radeon_debugfs_sa_init(struct radeon_device *rdev)
833{
834#if defined(CONFIG_DEBUG_FS)
835 return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
836#else
837 return 0;
838#endif
839}
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index adcf3e2f07da..b84f97c8718c 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -49,7 +49,7 @@ static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
49 49
50int radeon_sa_bo_manager_init(struct radeon_device *rdev, 50int radeon_sa_bo_manager_init(struct radeon_device *rdev,
51 struct radeon_sa_manager *sa_manager, 51 struct radeon_sa_manager *sa_manager,
52 unsigned size, u32 align, u32 domain) 52 unsigned size, u32 align, u32 domain, u32 flags)
53{ 53{
54 int i, r; 54 int i, r;
55 55
@@ -65,7 +65,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
65 } 65 }
66 66
67 r = radeon_bo_create(rdev, size, align, true, 67 r = radeon_bo_create(rdev, size, align, true,
68 domain, NULL, &sa_manager->bo); 68 domain, flags, NULL, &sa_manager->bo);
69 if (r) { 69 if (r) {
70 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); 70 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
71 return r; 71 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 3a13e0d1055c..5adf4207453d 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -56,13 +56,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
56 /* Number of tests = 56 /* Number of tests =
57 * (Total GTT - IB pool - writeback page - ring buffers) / test size 57 * (Total GTT - IB pool - writeback page - ring buffers) / test size
58 */ 58 */
59 n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024; 59 n = rdev->mc.gtt_size - rdev->gart_pin_size;
60 for (i = 0; i < RADEON_NUM_RINGS; ++i)
61 n -= rdev->ring[i].ring_size;
62 if (rdev->wb.wb_obj)
63 n -= RADEON_GPU_PAGE_SIZE;
64 if (rdev->ih.ring_obj)
65 n -= rdev->ih.ring_size;
66 n /= size; 60 n /= size;
67 61
68 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); 62 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
@@ -73,7 +67,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
73 } 67 }
74 68
75 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 69 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
76 NULL, &vram_obj); 70 0, NULL, &vram_obj);
77 if (r) { 71 if (r) {
78 DRM_ERROR("Failed to create VRAM object\n"); 72 DRM_ERROR("Failed to create VRAM object\n");
79 goto out_cleanup; 73 goto out_cleanup;
@@ -93,7 +87,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
93 struct radeon_fence *fence = NULL; 87 struct radeon_fence *fence = NULL;
94 88
95 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, 89 r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
96 RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i); 90 RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
97 if (r) { 91 if (r) {
98 DRM_ERROR("Failed to create GTT object %d\n", i); 92 DRM_ERROR("Failed to create GTT object %d\n", i);
99 goto out_lclean; 93 goto out_lclean;
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index f749f2c3bbdb..9db74a96ef61 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -72,8 +72,8 @@ TRACE_EVENT(radeon_vm_bo_update,
72 ), 72 ),
73 73
74 TP_fast_assign( 74 TP_fast_assign(
75 __entry->soffset = bo_va->soffset; 75 __entry->soffset = bo_va->it.start;
76 __entry->eoffset = bo_va->eoffset; 76 __entry->eoffset = bo_va->it.last + 1;
77 __entry->flags = bo_va->flags; 77 __entry->flags = bo_va->flags;
78 ), 78 ),
79 TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x", 79 TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
@@ -104,6 +104,24 @@ TRACE_EVENT(radeon_vm_set_page,
104 __entry->flags, __entry->count) 104 __entry->flags, __entry->count)
105); 105);
106 106
107TRACE_EVENT(radeon_vm_flush,
108 TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
109 TP_ARGS(pd_addr, ring, id),
110 TP_STRUCT__entry(
111 __field(u64, pd_addr)
112 __field(u32, ring)
113 __field(u32, id)
114 ),
115
116 TP_fast_assign(
117 __entry->pd_addr = pd_addr;
118 __entry->ring = ring;
119 __entry->id = id;
120 ),
121 TP_printk("pd_addr=%010Lx, ring=%u, id=%u",
122 __entry->pd_addr, __entry->ring, __entry->id)
123);
124
107DECLARE_EVENT_CLASS(radeon_fence_request, 125DECLARE_EVENT_CLASS(radeon_fence_request,
108 126
109 TP_PROTO(struct drm_device *dev, int ring, u32 seqno), 127 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index c8a8a5144ec1..72afe82a95c9 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -521,6 +521,8 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
521 struct ttm_mem_reg *bo_mem) 521 struct ttm_mem_reg *bo_mem)
522{ 522{
523 struct radeon_ttm_tt *gtt = (void*)ttm; 523 struct radeon_ttm_tt *gtt = (void*)ttm;
524 uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
525 RADEON_GART_PAGE_WRITE;
524 int r; 526 int r;
525 527
526 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); 528 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
@@ -528,8 +530,10 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
528 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", 530 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
529 ttm->num_pages, bo_mem, ttm); 531 ttm->num_pages, bo_mem, ttm);
530 } 532 }
531 r = radeon_gart_bind(gtt->rdev, gtt->offset, 533 if (ttm->caching_state == tt_cached)
532 ttm->num_pages, ttm->pages, gtt->ttm.dma_address); 534 flags |= RADEON_GART_PAGE_SNOOP;
535 r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
536 ttm->pages, gtt->ttm.dma_address, flags);
533 if (r) { 537 if (r) {
534 DRM_ERROR("failed to bind %lu pages at 0x%08X\n", 538 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
535 ttm->num_pages, (unsigned)gtt->offset); 539 ttm->num_pages, (unsigned)gtt->offset);
@@ -726,7 +730,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
726 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 730 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
727 731
728 r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, 732 r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
729 RADEON_GEM_DOMAIN_VRAM, 733 RADEON_GEM_DOMAIN_VRAM, 0,
730 NULL, &rdev->stollen_vga_memory); 734 NULL, &rdev->stollen_vga_memory);
731 if (r) { 735 if (r) {
732 return r; 736 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.c b/drivers/gpu/drm/radeon/radeon_ucode.c
new file mode 100644
index 000000000000..6beec680390c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ucode.c
@@ -0,0 +1,167 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <linux/slab.h>
26#include <linux/module.h>
27#include <drm/drmP.h>
28#include "radeon.h"
29#include "radeon_ucode.h"
30
31static void radeon_ucode_print_common_hdr(const struct common_firmware_header *hdr)
32{
33 DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
34 DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr->header_size_bytes));
35 DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr->header_version_major));
36 DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr->header_version_minor));
37 DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr->ip_version_major));
38 DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr->ip_version_minor));
39 DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr->ucode_version));
40 DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr->ucode_size_bytes));
41 DRM_DEBUG("ucode_array_offset_bytes: %u\n",
42 le32_to_cpu(hdr->ucode_array_offset_bytes));
43 DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr->crc32));
44}
45
46void radeon_ucode_print_mc_hdr(const struct common_firmware_header *hdr)
47{
48 uint16_t version_major = le16_to_cpu(hdr->header_version_major);
49 uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
50
51 DRM_DEBUG("MC\n");
52 radeon_ucode_print_common_hdr(hdr);
53
54 if (version_major == 1) {
55 const struct mc_firmware_header_v1_0 *mc_hdr =
56 container_of(hdr, struct mc_firmware_header_v1_0, header);
57
58 DRM_DEBUG("io_debug_size_bytes: %u\n",
59 le32_to_cpu(mc_hdr->io_debug_size_bytes));
60 DRM_DEBUG("io_debug_array_offset_bytes: %u\n",
61 le32_to_cpu(mc_hdr->io_debug_array_offset_bytes));
62 } else {
63 DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major, version_minor);
64 }
65}
66
67void radeon_ucode_print_smc_hdr(const struct common_firmware_header *hdr)
68{
69 uint16_t version_major = le16_to_cpu(hdr->header_version_major);
70 uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
71
72 DRM_DEBUG("SMC\n");
73 radeon_ucode_print_common_hdr(hdr);
74
75 if (version_major == 1) {
76 const struct smc_firmware_header_v1_0 *smc_hdr =
77 container_of(hdr, struct smc_firmware_header_v1_0, header);
78
79 DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr->ucode_start_addr));
80 } else {
81 DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor);
82 }
83}
84
85void radeon_ucode_print_gfx_hdr(const struct common_firmware_header *hdr)
86{
87 uint16_t version_major = le16_to_cpu(hdr->header_version_major);
88 uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
89
90 DRM_DEBUG("GFX\n");
91 radeon_ucode_print_common_hdr(hdr);
92
93 if (version_major == 1) {
94 const struct gfx_firmware_header_v1_0 *gfx_hdr =
95 container_of(hdr, struct gfx_firmware_header_v1_0, header);
96
97 DRM_DEBUG("ucode_feature_version: %u\n",
98 le32_to_cpu(gfx_hdr->ucode_feature_version));
99 DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr->jt_offset));
100 DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr->jt_size));
101 } else {
102 DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor);
103 }
104}
105
106void radeon_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
107{
108 uint16_t version_major = le16_to_cpu(hdr->header_version_major);
109 uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
110
111 DRM_DEBUG("RLC\n");
112 radeon_ucode_print_common_hdr(hdr);
113
114 if (version_major == 1) {
115 const struct rlc_firmware_header_v1_0 *rlc_hdr =
116 container_of(hdr, struct rlc_firmware_header_v1_0, header);
117
118 DRM_DEBUG("ucode_feature_version: %u\n",
119 le32_to_cpu(rlc_hdr->ucode_feature_version));
120 DRM_DEBUG("save_and_restore_offset: %u\n",
121 le32_to_cpu(rlc_hdr->save_and_restore_offset));
122 DRM_DEBUG("clear_state_descriptor_offset: %u\n",
123 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
124 DRM_DEBUG("avail_scratch_ram_locations: %u\n",
125 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
126 DRM_DEBUG("master_pkt_description_offset: %u\n",
127 le32_to_cpu(rlc_hdr->master_pkt_description_offset));
128 } else {
129 DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
130 }
131}
132
133void radeon_ucode_print_sdma_hdr(const struct common_firmware_header *hdr)
134{
135 uint16_t version_major = le16_to_cpu(hdr->header_version_major);
136 uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
137
138 DRM_DEBUG("SDMA\n");
139 radeon_ucode_print_common_hdr(hdr);
140
141 if (version_major == 1) {
142 const struct sdma_firmware_header_v1_0 *sdma_hdr =
143 container_of(hdr, struct sdma_firmware_header_v1_0, header);
144
145 DRM_DEBUG("ucode_feature_version: %u\n",
146 le32_to_cpu(sdma_hdr->ucode_feature_version));
147 DRM_DEBUG("ucode_change_version: %u\n",
148 le32_to_cpu(sdma_hdr->ucode_change_version));
149 DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr->jt_offset));
150 DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr->jt_size));
151 } else {
152 DRM_ERROR("Unknown SDMA ucode version: %u.%u\n",
153 version_major, version_minor);
154 }
155}
156
157int radeon_ucode_validate(const struct firmware *fw)
158{
159 const struct common_firmware_header *hdr =
160 (const struct common_firmware_header *)fw->data;
161
162 if (fw->size == le32_to_cpu(hdr->size_bytes))
163 return 0;
164
165 return -EINVAL;
166}
167
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index 4e7c3269b183..dc4576e4d8ad 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -153,4 +153,75 @@
153#define HAWAII_SMC_UCODE_START 0x20000 153#define HAWAII_SMC_UCODE_START 0x20000
154#define HAWAII_SMC_UCODE_SIZE 0x1FDEC 154#define HAWAII_SMC_UCODE_SIZE 0x1FDEC
155 155
156struct common_firmware_header {
157 uint32_t size_bytes; /* size of the entire header+image(s) in bytes */
158 uint32_t header_size_bytes; /* size of just the header in bytes */
159 uint16_t header_version_major; /* header version */
160 uint16_t header_version_minor; /* header version */
161 uint16_t ip_version_major; /* IP version */
162 uint16_t ip_version_minor; /* IP version */
163 uint32_t ucode_version;
164 uint32_t ucode_size_bytes; /* size of ucode in bytes */
165 uint32_t ucode_array_offset_bytes; /* payload offset from the start of the header */
166 uint32_t crc32; /* crc32 checksum of the payload */
167};
168
169/* version_major=1, version_minor=0 */
170struct mc_firmware_header_v1_0 {
171 struct common_firmware_header header;
172 uint32_t io_debug_size_bytes; /* size of debug array in dwords */
173 uint32_t io_debug_array_offset_bytes; /* payload offset from the start of the header */
174};
175
176/* version_major=1, version_minor=0 */
177struct smc_firmware_header_v1_0 {
178 struct common_firmware_header header;
179 uint32_t ucode_start_addr;
180};
181
182/* version_major=1, version_minor=0 */
183struct gfx_firmware_header_v1_0 {
184 struct common_firmware_header header;
185 uint32_t ucode_feature_version;
186 uint32_t jt_offset; /* jt location */
187 uint32_t jt_size; /* size of jt */
188};
189
190/* version_major=1, version_minor=0 */
191struct rlc_firmware_header_v1_0 {
192 struct common_firmware_header header;
193 uint32_t ucode_feature_version;
194 uint32_t save_and_restore_offset;
195 uint32_t clear_state_descriptor_offset;
196 uint32_t avail_scratch_ram_locations;
197 uint32_t master_pkt_description_offset;
198};
199
200/* version_major=1, version_minor=0 */
201struct sdma_firmware_header_v1_0 {
202 struct common_firmware_header header;
203 uint32_t ucode_feature_version;
204 uint32_t ucode_change_version;
205 uint32_t jt_offset; /* jt location */
206 uint32_t jt_size; /* size of jt */
207};
208
209/* header is fixed size */
210union radeon_firmware_header {
211 struct common_firmware_header common;
212 struct mc_firmware_header_v1_0 mc;
213 struct smc_firmware_header_v1_0 smc;
214 struct gfx_firmware_header_v1_0 gfx;
215 struct rlc_firmware_header_v1_0 rlc;
216 struct sdma_firmware_header_v1_0 sdma;
217 uint8_t raw[0x100];
218};
219
220void radeon_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
221void radeon_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
222void radeon_ucode_print_gfx_hdr(const struct common_firmware_header *hdr);
223void radeon_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
224void radeon_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
225int radeon_ucode_validate(const struct firmware *fw);
226
156#endif 227#endif
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index a4ad270e8261..6bf55ec85b62 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -117,7 +117,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
117 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + 117 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
118 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; 118 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
119 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, 119 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
120 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo); 120 RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo);
121 if (r) { 121 if (r) {
122 dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r); 122 dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
123 return r; 123 return r;
@@ -674,7 +674,7 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
674 int r, i; 674 int r, i;
675 675
676 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, 676 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
677 RADEON_GEM_DOMAIN_VRAM, NULL, &bo); 677 RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
678 if (r) 678 if (r)
679 return r; 679 return r;
680 680
@@ -720,7 +720,7 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
720 int r, i; 720 int r, i;
721 721
722 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, 722 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
723 RADEON_GEM_DOMAIN_VRAM, NULL, &bo); 723 RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
724 if (r) 724 if (r)
725 return r; 725 return r;
726 726
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index aa21c31a846c..f9b70a43aa52 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -126,7 +126,7 @@ int radeon_vce_init(struct radeon_device *rdev)
126 size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) + 126 size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) +
127 RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE; 127 RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE;
128 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, 128 r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
129 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->vce.vcpu_bo); 129 RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->vce.vcpu_bo);
130 if (r) { 130 if (r) {
131 dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r); 131 dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r);
132 return r; 132 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 725d3669014f..ccae4d9dc3de 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -238,8 +238,8 @@ void radeon_vm_flush(struct radeon_device *rdev,
238 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); 238 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
239 239
240 /* if we can't remember our last VM flush then flush now! */ 240 /* if we can't remember our last VM flush then flush now! */
241 /* XXX figure out why we have to flush all the time */ 241 if (!vm->last_flush || pd_addr != vm->pd_gpu_addr) {
242 if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) { 242 trace_radeon_vm_flush(pd_addr, ring, vm->id);
243 vm->pd_gpu_addr = pd_addr; 243 vm->pd_gpu_addr = pd_addr;
244 radeon_ring_vm_flush(rdev, ring, vm); 244 radeon_ring_vm_flush(rdev, ring, vm);
245 } 245 }
@@ -325,17 +325,15 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
325 } 325 }
326 bo_va->vm = vm; 326 bo_va->vm = vm;
327 bo_va->bo = bo; 327 bo_va->bo = bo;
328 bo_va->soffset = 0; 328 bo_va->it.start = 0;
329 bo_va->eoffset = 0; 329 bo_va->it.last = 0;
330 bo_va->flags = 0; 330 bo_va->flags = 0;
331 bo_va->valid = false; 331 bo_va->addr = 0;
332 bo_va->ref_count = 1; 332 bo_va->ref_count = 1;
333 INIT_LIST_HEAD(&bo_va->bo_list); 333 INIT_LIST_HEAD(&bo_va->bo_list);
334 INIT_LIST_HEAD(&bo_va->vm_list);
335 INIT_LIST_HEAD(&bo_va->vm_status); 334 INIT_LIST_HEAD(&bo_va->vm_status);
336 335
337 mutex_lock(&vm->mutex); 336 mutex_lock(&vm->mutex);
338 list_add(&bo_va->vm_list, &vm->va);
339 list_add_tail(&bo_va->bo_list, &bo->va); 337 list_add_tail(&bo_va->bo_list, &bo->va);
340 mutex_unlock(&vm->mutex); 338 mutex_unlock(&vm->mutex);
341 339
@@ -343,6 +341,42 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
343} 341}
344 342
345/** 343/**
344 * radeon_vm_set_pages - helper to call the right asic function
345 *
346 * @rdev: radeon_device pointer
347 * @ib: indirect buffer to fill with commands
348 * @pe: addr of the page entry
349 * @addr: dst addr to write into pe
350 * @count: number of page entries to update
351 * @incr: increase next addr by incr bytes
352 * @flags: hw access flags
353 *
354 * Traces the parameters and calls the right asic functions
355 * to setup the page table using the DMA.
356 */
357static void radeon_vm_set_pages(struct radeon_device *rdev,
358 struct radeon_ib *ib,
359 uint64_t pe,
360 uint64_t addr, unsigned count,
361 uint32_t incr, uint32_t flags)
362{
363 trace_radeon_vm_set_page(pe, addr, count, incr, flags);
364
365 if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
366 uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
367 radeon_asic_vm_copy_pages(rdev, ib, pe, src, count);
368
369 } else if ((flags & R600_PTE_SYSTEM) || (count < 3)) {
370 radeon_asic_vm_write_pages(rdev, ib, pe, addr,
371 count, incr, flags);
372
373 } else {
374 radeon_asic_vm_set_pages(rdev, ib, pe, addr,
375 count, incr, flags);
376 }
377}
378
379/**
346 * radeon_vm_clear_bo - initially clear the page dir/table 380 * radeon_vm_clear_bo - initially clear the page dir/table
347 * 381 *
348 * @rdev: radeon_device pointer 382 * @rdev: radeon_device pointer
@@ -376,14 +410,15 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
376 addr = radeon_bo_gpu_offset(bo); 410 addr = radeon_bo_gpu_offset(bo);
377 entries = radeon_bo_size(bo) / 8; 411 entries = radeon_bo_size(bo) / 8;
378 412
379 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, 413 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256);
380 NULL, entries * 2 + 64);
381 if (r) 414 if (r)
382 goto error; 415 goto error;
383 416
384 ib.length_dw = 0; 417 ib.length_dw = 0;
385 418
386 radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0); 419 radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0);
420 radeon_asic_vm_pad_ib(rdev, &ib);
421 WARN_ON(ib.length_dw > 64);
387 422
388 r = radeon_ib_schedule(rdev, &ib, NULL); 423 r = radeon_ib_schedule(rdev, &ib, NULL);
389 if (r) 424 if (r)
@@ -419,11 +454,9 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
419 uint32_t flags) 454 uint32_t flags)
420{ 455{
421 uint64_t size = radeon_bo_size(bo_va->bo); 456 uint64_t size = radeon_bo_size(bo_va->bo);
422 uint64_t eoffset, last_offset = 0;
423 struct radeon_vm *vm = bo_va->vm; 457 struct radeon_vm *vm = bo_va->vm;
424 struct radeon_bo_va *tmp;
425 struct list_head *head;
426 unsigned last_pfn, pt_idx; 458 unsigned last_pfn, pt_idx;
459 uint64_t eoffset;
427 int r; 460 int r;
428 461
429 if (soffset) { 462 if (soffset) {
@@ -445,51 +478,49 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
445 } 478 }
446 479
447 mutex_lock(&vm->mutex); 480 mutex_lock(&vm->mutex);
448 head = &vm->va; 481 if (bo_va->it.start || bo_va->it.last) {
449 last_offset = 0; 482 if (bo_va->addr) {
450 list_for_each_entry(tmp, &vm->va, vm_list) { 483 /* add a clone of the bo_va to clear the old address */
451 if (bo_va == tmp) { 484 struct radeon_bo_va *tmp;
452 /* skip over currently modified bo */ 485 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
453 continue; 486 tmp->it.start = bo_va->it.start;
487 tmp->it.last = bo_va->it.last;
488 tmp->vm = vm;
489 tmp->addr = bo_va->addr;
490 tmp->bo = radeon_bo_ref(bo_va->bo);
491 list_add(&tmp->vm_status, &vm->freed);
454 } 492 }
455 493
456 if (soffset >= last_offset && eoffset <= tmp->soffset) { 494 interval_tree_remove(&bo_va->it, &vm->va);
457 /* bo can be added before this one */ 495 bo_va->it.start = 0;
458 break; 496 bo_va->it.last = 0;
459 }
460 if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
461 /* bo and tmp overlap, invalid offset */
462 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
463 bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
464 (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
465 mutex_unlock(&vm->mutex);
466 return -EINVAL;
467 }
468 last_offset = tmp->eoffset;
469 head = &tmp->vm_list;
470 } 497 }
471 498
472 if (bo_va->soffset) { 499 soffset /= RADEON_GPU_PAGE_SIZE;
473 /* add a clone of the bo_va to clear the old address */ 500 eoffset /= RADEON_GPU_PAGE_SIZE;
474 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); 501 if (soffset || eoffset) {
475 if (!tmp) { 502 struct interval_tree_node *it;
503 it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
504 if (it) {
505 struct radeon_bo_va *tmp;
506 tmp = container_of(it, struct radeon_bo_va, it);
507 /* bo and tmp overlap, invalid offset */
508 dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
509 "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
510 soffset, tmp->bo, tmp->it.start, tmp->it.last);
476 mutex_unlock(&vm->mutex); 511 mutex_unlock(&vm->mutex);
477 return -ENOMEM; 512 return -EINVAL;
478 } 513 }
479 tmp->soffset = bo_va->soffset; 514 bo_va->it.start = soffset;
480 tmp->eoffset = bo_va->eoffset; 515 bo_va->it.last = eoffset - 1;
481 tmp->vm = vm; 516 interval_tree_insert(&bo_va->it, &vm->va);
482 list_add(&tmp->vm_status, &vm->freed);
483 } 517 }
484 518
485 bo_va->soffset = soffset;
486 bo_va->eoffset = eoffset;
487 bo_va->flags = flags; 519 bo_va->flags = flags;
488 bo_va->valid = false; 520 bo_va->addr = 0;
489 list_move(&bo_va->vm_list, head);
490 521
491 soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size; 522 soffset >>= radeon_vm_block_size;
492 eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size; 523 eoffset >>= radeon_vm_block_size;
493 524
494 BUG_ON(eoffset >= radeon_vm_num_pdes(rdev)); 525 BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
495 526
@@ -510,7 +541,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
510 541
511 r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8, 542 r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
512 RADEON_GPU_PAGE_SIZE, true, 543 RADEON_GPU_PAGE_SIZE, true,
513 RADEON_GEM_DOMAIN_VRAM, NULL, &pt); 544 RADEON_GEM_DOMAIN_VRAM, 0, NULL, &pt);
514 if (r) 545 if (r)
515 return r; 546 return r;
516 547
@@ -611,7 +642,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
611 ndw = 64; 642 ndw = 64;
612 643
613 /* assume the worst case */ 644 /* assume the worst case */
614 ndw += vm->max_pde_used * 16; 645 ndw += vm->max_pde_used * 6;
615 646
616 /* update too big for an IB */ 647 /* update too big for an IB */
617 if (ndw > 0xfffff) 648 if (ndw > 0xfffff)
@@ -640,9 +671,9 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
640 ((last_pt + incr * count) != pt)) { 671 ((last_pt + incr * count) != pt)) {
641 672
642 if (count) { 673 if (count) {
643 radeon_asic_vm_set_page(rdev, &ib, last_pde, 674 radeon_vm_set_pages(rdev, &ib, last_pde,
644 last_pt, count, incr, 675 last_pt, count, incr,
645 R600_PTE_VALID); 676 R600_PTE_VALID);
646 } 677 }
647 678
648 count = 1; 679 count = 1;
@@ -654,12 +685,14 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
654 } 685 }
655 686
656 if (count) 687 if (count)
657 radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count, 688 radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count,
658 incr, R600_PTE_VALID); 689 incr, R600_PTE_VALID);
659 690
660 if (ib.length_dw != 0) { 691 if (ib.length_dw != 0) {
692 radeon_asic_vm_pad_ib(rdev, &ib);
661 radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); 693 radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
662 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); 694 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
695 WARN_ON(ib.length_dw > ndw);
663 r = radeon_ib_schedule(rdev, &ib, NULL); 696 r = radeon_ib_schedule(rdev, &ib, NULL);
664 if (r) { 697 if (r) {
665 radeon_ib_free(rdev, &ib); 698 radeon_ib_free(rdev, &ib);
@@ -725,30 +758,30 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
725 (frag_start >= frag_end)) { 758 (frag_start >= frag_end)) {
726 759
727 count = (pe_end - pe_start) / 8; 760 count = (pe_end - pe_start) / 8;
728 radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count, 761 radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
729 RADEON_GPU_PAGE_SIZE, flags); 762 RADEON_GPU_PAGE_SIZE, flags);
730 return; 763 return;
731 } 764 }
732 765
733 /* handle the 4K area at the beginning */ 766 /* handle the 4K area at the beginning */
734 if (pe_start != frag_start) { 767 if (pe_start != frag_start) {
735 count = (frag_start - pe_start) / 8; 768 count = (frag_start - pe_start) / 8;
736 radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count, 769 radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
737 RADEON_GPU_PAGE_SIZE, flags); 770 RADEON_GPU_PAGE_SIZE, flags);
738 addr += RADEON_GPU_PAGE_SIZE * count; 771 addr += RADEON_GPU_PAGE_SIZE * count;
739 } 772 }
740 773
741 /* handle the area in the middle */ 774 /* handle the area in the middle */
742 count = (frag_end - frag_start) / 8; 775 count = (frag_end - frag_start) / 8;
743 radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count, 776 radeon_vm_set_pages(rdev, ib, frag_start, addr, count,
744 RADEON_GPU_PAGE_SIZE, flags | frag_flags); 777 RADEON_GPU_PAGE_SIZE, flags | frag_flags);
745 778
746 /* handle the 4K area at the end */ 779 /* handle the 4K area at the end */
747 if (frag_end != pe_end) { 780 if (frag_end != pe_end) {
748 addr += RADEON_GPU_PAGE_SIZE * count; 781 addr += RADEON_GPU_PAGE_SIZE * count;
749 count = (pe_end - frag_end) / 8; 782 count = (pe_end - frag_end) / 8;
750 radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count, 783 radeon_vm_set_pages(rdev, ib, frag_end, addr, count,
751 RADEON_GPU_PAGE_SIZE, flags); 784 RADEON_GPU_PAGE_SIZE, flags);
752 } 785 }
753} 786}
754 787
@@ -777,9 +810,6 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
777 unsigned count = 0; 810 unsigned count = 0;
778 uint64_t addr; 811 uint64_t addr;
779 812
780 start = start / RADEON_GPU_PAGE_SIZE;
781 end = end / RADEON_GPU_PAGE_SIZE;
782
783 /* walk over the address space and update the page tables */ 813 /* walk over the address space and update the page tables */
784 for (addr = start; addr < end; ) { 814 for (addr = start; addr < end; ) {
785 uint64_t pt_idx = addr >> radeon_vm_block_size; 815 uint64_t pt_idx = addr >> radeon_vm_block_size;
@@ -842,55 +872,73 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
842{ 872{
843 struct radeon_vm *vm = bo_va->vm; 873 struct radeon_vm *vm = bo_va->vm;
844 struct radeon_ib ib; 874 struct radeon_ib ib;
845 unsigned nptes, ndw; 875 unsigned nptes, ncmds, ndw;
846 uint64_t addr; 876 uint64_t addr;
877 uint32_t flags;
847 int r; 878 int r;
848 879
849 880 if (!bo_va->it.start) {
850 if (!bo_va->soffset) {
851 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", 881 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
852 bo_va->bo, vm); 882 bo_va->bo, vm);
853 return -EINVAL; 883 return -EINVAL;
854 } 884 }
855 885
856 if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) 886 list_del_init(&bo_va->vm_status);
857 return 0;
858 887
859 bo_va->flags &= ~RADEON_VM_PAGE_VALID; 888 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
860 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; 889 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
890 bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
861 if (mem) { 891 if (mem) {
862 addr = mem->start << PAGE_SHIFT; 892 addr = mem->start << PAGE_SHIFT;
863 if (mem->mem_type != TTM_PL_SYSTEM) { 893 if (mem->mem_type != TTM_PL_SYSTEM) {
864 bo_va->flags |= RADEON_VM_PAGE_VALID; 894 bo_va->flags |= RADEON_VM_PAGE_VALID;
865 bo_va->valid = true;
866 } 895 }
867 if (mem->mem_type == TTM_PL_TT) { 896 if (mem->mem_type == TTM_PL_TT) {
868 bo_va->flags |= RADEON_VM_PAGE_SYSTEM; 897 bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
898 if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
899 bo_va->flags |= RADEON_VM_PAGE_SNOOPED;
900
869 } else { 901 } else {
870 addr += rdev->vm_manager.vram_base_offset; 902 addr += rdev->vm_manager.vram_base_offset;
871 } 903 }
872 } else { 904 } else {
873 addr = 0; 905 addr = 0;
874 bo_va->valid = false;
875 } 906 }
876 907
908 if (addr == bo_va->addr)
909 return 0;
910 bo_va->addr = addr;
911
877 trace_radeon_vm_bo_update(bo_va); 912 trace_radeon_vm_bo_update(bo_va);
878 913
879 nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE; 914 nptes = bo_va->it.last - bo_va->it.start + 1;
915
916 /* reserve space for one command every (1 << BLOCK_SIZE) entries
917 or 2k dwords (whatever is smaller) */
918 ncmds = (nptes >> min(radeon_vm_block_size, 11)) + 1;
880 919
881 /* padding, etc. */ 920 /* padding, etc. */
882 ndw = 64; 921 ndw = 64;
883 922
884 if (radeon_vm_block_size > 11) 923 flags = radeon_vm_page_flags(bo_va->flags);
885 /* reserve space for one header for every 2k dwords */ 924 if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
886 ndw += (nptes >> 11) * 4; 925 /* only copy commands needed */
887 else 926 ndw += ncmds * 7;
888 /* reserve space for one header for
889 every (1 << BLOCK_SIZE) entries */
890 ndw += (nptes >> radeon_vm_block_size) * 4;
891 927
892 /* reserve space for pte addresses */ 928 } else if (flags & R600_PTE_SYSTEM) {
893 ndw += nptes * 2; 929 /* header for write data commands */
930 ndw += ncmds * 4;
931
932 /* body of write data command */
933 ndw += nptes * 2;
934
935 } else {
936 /* set page commands needed */
937 ndw += ncmds * 10;
938
939 /* two extra commands for begin/end of fragment */
940 ndw += 2 * 10;
941 }
894 942
895 /* update too big for an IB */ 943 /* update too big for an IB */
896 if (ndw > 0xfffff) 944 if (ndw > 0xfffff)
@@ -901,8 +949,12 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
901 return r; 949 return r;
902 ib.length_dw = 0; 950 ib.length_dw = 0;
903 951
904 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, 952 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
905 addr, radeon_vm_page_flags(bo_va->flags)); 953 bo_va->it.last + 1, addr,
954 radeon_vm_page_flags(bo_va->flags));
955
956 radeon_asic_vm_pad_ib(rdev, &ib);
957 WARN_ON(ib.length_dw > ndw);
906 958
907 radeon_semaphore_sync_to(ib.semaphore, vm->fence); 959 radeon_semaphore_sync_to(ib.semaphore, vm->fence);
908 r = radeon_ib_schedule(rdev, &ib, NULL); 960 r = radeon_ib_schedule(rdev, &ib, NULL);
@@ -936,8 +988,8 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
936 int r; 988 int r;
937 989
938 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { 990 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
939 list_del(&bo_va->vm_status);
940 r = radeon_vm_bo_update(rdev, bo_va, NULL); 991 r = radeon_vm_bo_update(rdev, bo_va, NULL);
992 radeon_bo_unref(&bo_va->bo);
941 kfree(bo_va); 993 kfree(bo_va);
942 if (r) 994 if (r)
943 return r; 995 return r;
@@ -947,6 +999,31 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
947} 999}
948 1000
949/** 1001/**
1002 * radeon_vm_clear_invalids - clear invalidated BOs in the PT
1003 *
1004 * @rdev: radeon_device pointer
1005 * @vm: requested vm
1006 *
1007 * Make sure all invalidated BOs are cleared in the PT.
1008 * Returns 0 for success.
1009 *
1010 * PTs have to be reserved and mutex must be locked!
1011 */
1012int radeon_vm_clear_invalids(struct radeon_device *rdev,
1013 struct radeon_vm *vm)
1014{
1015 struct radeon_bo_va *bo_va, *tmp;
1016 int r;
1017
1018 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) {
1019 r = radeon_vm_bo_update(rdev, bo_va, NULL);
1020 if (r)
1021 return r;
1022 }
1023 return 0;
1024}
1025
1026/**
950 * radeon_vm_bo_rmv - remove a bo to a specific vm 1027 * radeon_vm_bo_rmv - remove a bo to a specific vm
951 * 1028 *
952 * @rdev: radeon_device pointer 1029 * @rdev: radeon_device pointer
@@ -964,10 +1041,11 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
964 list_del(&bo_va->bo_list); 1041 list_del(&bo_va->bo_list);
965 1042
966 mutex_lock(&vm->mutex); 1043 mutex_lock(&vm->mutex);
967 list_del(&bo_va->vm_list); 1044 interval_tree_remove(&bo_va->it, &vm->va);
1045 list_del(&bo_va->vm_status);
968 1046
969 if (bo_va->soffset) { 1047 if (bo_va->addr) {
970 bo_va->bo = NULL; 1048 bo_va->bo = radeon_bo_ref(bo_va->bo);
971 list_add(&bo_va->vm_status, &vm->freed); 1049 list_add(&bo_va->vm_status, &vm->freed);
972 } else { 1050 } else {
973 kfree(bo_va); 1051 kfree(bo_va);
@@ -991,7 +1069,12 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
991 struct radeon_bo_va *bo_va; 1069 struct radeon_bo_va *bo_va;
992 1070
993 list_for_each_entry(bo_va, &bo->va, bo_list) { 1071 list_for_each_entry(bo_va, &bo->va, bo_list) {
994 bo_va->valid = false; 1072 if (bo_va->addr) {
1073 mutex_lock(&bo_va->vm->mutex);
1074 list_del(&bo_va->vm_status);
1075 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1076 mutex_unlock(&bo_va->vm->mutex);
1077 }
995 } 1078 }
996} 1079}
997 1080
@@ -1016,7 +1099,8 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
1016 vm->last_flush = NULL; 1099 vm->last_flush = NULL;
1017 vm->last_id_use = NULL; 1100 vm->last_id_use = NULL;
1018 mutex_init(&vm->mutex); 1101 mutex_init(&vm->mutex);
1019 INIT_LIST_HEAD(&vm->va); 1102 vm->va = RB_ROOT;
1103 INIT_LIST_HEAD(&vm->invalidated);
1020 INIT_LIST_HEAD(&vm->freed); 1104 INIT_LIST_HEAD(&vm->freed);
1021 1105
1022 pd_size = radeon_vm_directory_size(rdev); 1106 pd_size = radeon_vm_directory_size(rdev);
@@ -1031,7 +1115,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
1031 } 1115 }
1032 1116
1033 r = radeon_bo_create(rdev, pd_size, align, true, 1117 r = radeon_bo_create(rdev, pd_size, align, true,
1034 RADEON_GEM_DOMAIN_VRAM, NULL, 1118 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
1035 &vm->page_directory); 1119 &vm->page_directory);
1036 if (r) 1120 if (r)
1037 return r; 1121 return r;
@@ -1060,11 +1144,11 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1060 struct radeon_bo_va *bo_va, *tmp; 1144 struct radeon_bo_va *bo_va, *tmp;
1061 int i, r; 1145 int i, r;
1062 1146
1063 if (!list_empty(&vm->va)) { 1147 if (!RB_EMPTY_ROOT(&vm->va)) {
1064 dev_err(rdev->dev, "still active bo inside vm\n"); 1148 dev_err(rdev->dev, "still active bo inside vm\n");
1065 } 1149 }
1066 list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) { 1150 rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) {
1067 list_del_init(&bo_va->vm_list); 1151 interval_tree_remove(&bo_va->it, &vm->va);
1068 r = radeon_bo_reserve(bo_va->bo, false); 1152 r = radeon_bo_reserve(bo_va->bo, false);
1069 if (!r) { 1153 if (!r) {
1070 list_del_init(&bo_va->bo_list); 1154 list_del_init(&bo_va->bo_list);
@@ -1072,8 +1156,10 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1072 kfree(bo_va); 1156 kfree(bo_va);
1073 } 1157 }
1074 } 1158 }
1075 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) 1159 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
1160 radeon_bo_unref(&bo_va->bo);
1076 kfree(bo_va); 1161 kfree(bo_va);
1162 }
1077 1163
1078 for (i = 0; i < radeon_vm_num_pdes(rdev); i++) 1164 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
1079 radeon_bo_unref(&vm->page_tables[i].bo); 1165 radeon_bo_unref(&vm->page_tables[i].bo);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index a0f96decece3..6c1fc339d228 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -109,7 +109,6 @@ int rs400_gart_enable(struct radeon_device *rdev)
109 uint32_t size_reg; 109 uint32_t size_reg;
110 uint32_t tmp; 110 uint32_t tmp;
111 111
112 radeon_gart_restore(rdev);
113 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); 112 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
114 tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS; 113 tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
115 WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp); 114 WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
@@ -209,17 +208,24 @@ void rs400_gart_fini(struct radeon_device *rdev)
209 radeon_gart_table_ram_free(rdev); 208 radeon_gart_table_ram_free(rdev);
210} 209}
211 210
211#define RS400_PTE_UNSNOOPED (1 << 0)
212#define RS400_PTE_WRITEABLE (1 << 2) 212#define RS400_PTE_WRITEABLE (1 << 2)
213#define RS400_PTE_READABLE (1 << 3) 213#define RS400_PTE_READABLE (1 << 3)
214 214
215void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr) 215void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
216 uint64_t addr, uint32_t flags)
216{ 217{
217 uint32_t entry; 218 uint32_t entry;
218 u32 *gtt = rdev->gart.ptr; 219 u32 *gtt = rdev->gart.ptr;
219 220
220 entry = (lower_32_bits(addr) & PAGE_MASK) | 221 entry = (lower_32_bits(addr) & PAGE_MASK) |
221 ((upper_32_bits(addr) & 0xff) << 4) | 222 ((upper_32_bits(addr) & 0xff) << 4);
222 RS400_PTE_WRITEABLE | RS400_PTE_READABLE; 223 if (flags & RADEON_GART_PAGE_READ)
224 addr |= RS400_PTE_READABLE;
225 if (flags & RADEON_GART_PAGE_WRITE)
226 addr |= RS400_PTE_WRITEABLE;
227 if (!(flags & RADEON_GART_PAGE_SNOOP))
228 entry |= RS400_PTE_UNSNOOPED;
223 entry = cpu_to_le32(entry); 229 entry = cpu_to_le32(entry);
224 gtt[i] = entry; 230 gtt[i] = entry;
225} 231}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d1a35cb1c91d..5f6db4629aaa 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -555,7 +555,6 @@ static int rs600_gart_enable(struct radeon_device *rdev)
555 r = radeon_gart_table_vram_pin(rdev); 555 r = radeon_gart_table_vram_pin(rdev);
556 if (r) 556 if (r)
557 return r; 557 return r;
558 radeon_gart_restore(rdev);
559 /* Enable bus master */ 558 /* Enable bus master */
560 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; 559 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
561 WREG32(RADEON_BUS_CNTL, tmp); 560 WREG32(RADEON_BUS_CNTL, tmp);
@@ -626,15 +625,21 @@ static void rs600_gart_fini(struct radeon_device *rdev)
626 radeon_gart_table_vram_free(rdev); 625 radeon_gart_table_vram_free(rdev);
627} 626}
628 627
629void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr) 628void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
629 uint64_t addr, uint32_t flags)
630{ 630{
631 void __iomem *ptr = (void *)rdev->gart.ptr; 631 void __iomem *ptr = (void *)rdev->gart.ptr;
632 632
633 addr = addr & 0xFFFFFFFFFFFFF000ULL; 633 addr = addr & 0xFFFFFFFFFFFFF000ULL;
634 if (addr == rdev->dummy_page.addr) 634 addr |= R600_PTE_SYSTEM;
635 addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED; 635 if (flags & RADEON_GART_PAGE_VALID)
636 else 636 addr |= R600_PTE_VALID;
637 addr |= R600_PTE_GART; 637 if (flags & RADEON_GART_PAGE_READ)
638 addr |= R600_PTE_READABLE;
639 if (flags & RADEON_GART_PAGE_WRITE)
640 addr |= R600_PTE_WRITEABLE;
641 if (flags & RADEON_GART_PAGE_SNOOP)
642 addr |= R600_PTE_SNOOPED;
638 writeq(addr, ptr + (i * 8)); 643 writeq(addr, ptr + (i * 8));
639} 644}
640 645
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index da8703d8d455..2983f17ea1b3 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -900,7 +900,6 @@ static int rv770_pcie_gart_enable(struct radeon_device *rdev)
900 r = radeon_gart_table_vram_pin(rdev); 900 r = radeon_gart_table_vram_pin(rdev);
901 if (r) 901 if (r)
902 return r; 902 return r;
903 radeon_gart_restore(rdev);
904 /* Setup L2 cache */ 903 /* Setup L2 cache */
905 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 904 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
906 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 905 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 9e854fd016da..011779bd2b3d 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -42,6 +42,14 @@ MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
42MODULE_FIRMWARE("radeon/TAHITI_mc2.bin"); 42MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
43MODULE_FIRMWARE("radeon/TAHITI_rlc.bin"); 43MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
44MODULE_FIRMWARE("radeon/TAHITI_smc.bin"); 44MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
45
46MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
47MODULE_FIRMWARE("radeon/tahiti_me.bin");
48MODULE_FIRMWARE("radeon/tahiti_ce.bin");
49MODULE_FIRMWARE("radeon/tahiti_mc.bin");
50MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
51MODULE_FIRMWARE("radeon/tahiti_smc.bin");
52
45MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin"); 53MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
46MODULE_FIRMWARE("radeon/PITCAIRN_me.bin"); 54MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
47MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin"); 55MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
@@ -49,6 +57,14 @@ MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
49MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin"); 57MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
50MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin"); 58MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
51MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin"); 59MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
60
61MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
62MODULE_FIRMWARE("radeon/pitcairn_me.bin");
63MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
64MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
65MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
66MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
67
52MODULE_FIRMWARE("radeon/VERDE_pfp.bin"); 68MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
53MODULE_FIRMWARE("radeon/VERDE_me.bin"); 69MODULE_FIRMWARE("radeon/VERDE_me.bin");
54MODULE_FIRMWARE("radeon/VERDE_ce.bin"); 70MODULE_FIRMWARE("radeon/VERDE_ce.bin");
@@ -56,6 +72,14 @@ MODULE_FIRMWARE("radeon/VERDE_mc.bin");
56MODULE_FIRMWARE("radeon/VERDE_mc2.bin"); 72MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
57MODULE_FIRMWARE("radeon/VERDE_rlc.bin"); 73MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
58MODULE_FIRMWARE("radeon/VERDE_smc.bin"); 74MODULE_FIRMWARE("radeon/VERDE_smc.bin");
75
76MODULE_FIRMWARE("radeon/verde_pfp.bin");
77MODULE_FIRMWARE("radeon/verde_me.bin");
78MODULE_FIRMWARE("radeon/verde_ce.bin");
79MODULE_FIRMWARE("radeon/verde_mc.bin");
80MODULE_FIRMWARE("radeon/verde_rlc.bin");
81MODULE_FIRMWARE("radeon/verde_smc.bin");
82
59MODULE_FIRMWARE("radeon/OLAND_pfp.bin"); 83MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
60MODULE_FIRMWARE("radeon/OLAND_me.bin"); 84MODULE_FIRMWARE("radeon/OLAND_me.bin");
61MODULE_FIRMWARE("radeon/OLAND_ce.bin"); 85MODULE_FIRMWARE("radeon/OLAND_ce.bin");
@@ -63,6 +87,14 @@ MODULE_FIRMWARE("radeon/OLAND_mc.bin");
63MODULE_FIRMWARE("radeon/OLAND_mc2.bin"); 87MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
64MODULE_FIRMWARE("radeon/OLAND_rlc.bin"); 88MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
65MODULE_FIRMWARE("radeon/OLAND_smc.bin"); 89MODULE_FIRMWARE("radeon/OLAND_smc.bin");
90
91MODULE_FIRMWARE("radeon/oland_pfp.bin");
92MODULE_FIRMWARE("radeon/oland_me.bin");
93MODULE_FIRMWARE("radeon/oland_ce.bin");
94MODULE_FIRMWARE("radeon/oland_mc.bin");
95MODULE_FIRMWARE("radeon/oland_rlc.bin");
96MODULE_FIRMWARE("radeon/oland_smc.bin");
97
66MODULE_FIRMWARE("radeon/HAINAN_pfp.bin"); 98MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
67MODULE_FIRMWARE("radeon/HAINAN_me.bin"); 99MODULE_FIRMWARE("radeon/HAINAN_me.bin");
68MODULE_FIRMWARE("radeon/HAINAN_ce.bin"); 100MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
@@ -71,6 +103,13 @@ MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
71MODULE_FIRMWARE("radeon/HAINAN_rlc.bin"); 103MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
72MODULE_FIRMWARE("radeon/HAINAN_smc.bin"); 104MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
73 105
106MODULE_FIRMWARE("radeon/hainan_pfp.bin");
107MODULE_FIRMWARE("radeon/hainan_me.bin");
108MODULE_FIRMWARE("radeon/hainan_ce.bin");
109MODULE_FIRMWARE("radeon/hainan_mc.bin");
110MODULE_FIRMWARE("radeon/hainan_rlc.bin");
111MODULE_FIRMWARE("radeon/hainan_smc.bin");
112
74static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); 113static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
75static void si_pcie_gen3_enable(struct radeon_device *rdev); 114static void si_pcie_gen3_enable(struct radeon_device *rdev);
76static void si_program_aspm(struct radeon_device *rdev); 115static void si_program_aspm(struct radeon_device *rdev);
@@ -1470,38 +1509,54 @@ static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1470/* ucode loading */ 1509/* ucode loading */
1471int si_mc_load_microcode(struct radeon_device *rdev) 1510int si_mc_load_microcode(struct radeon_device *rdev)
1472{ 1511{
1473 const __be32 *fw_data; 1512 const __be32 *fw_data = NULL;
1513 const __le32 *new_fw_data = NULL;
1474 u32 running, blackout = 0; 1514 u32 running, blackout = 0;
1475 u32 *io_mc_regs; 1515 u32 *io_mc_regs = NULL;
1516 const __le32 *new_io_mc_regs = NULL;
1476 int i, regs_size, ucode_size; 1517 int i, regs_size, ucode_size;
1477 1518
1478 if (!rdev->mc_fw) 1519 if (!rdev->mc_fw)
1479 return -EINVAL; 1520 return -EINVAL;
1480 1521
1481 ucode_size = rdev->mc_fw->size / 4; 1522 if (rdev->new_fw) {
1523 const struct mc_firmware_header_v1_0 *hdr =
1524 (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
1525
1526 radeon_ucode_print_mc_hdr(&hdr->header);
1527 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
1528 new_io_mc_regs = (const __le32 *)
1529 (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
1530 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1531 new_fw_data = (const __le32 *)
1532 (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1533 } else {
1534 ucode_size = rdev->mc_fw->size / 4;
1482 1535
1483 switch (rdev->family) { 1536 switch (rdev->family) {
1484 case CHIP_TAHITI: 1537 case CHIP_TAHITI:
1485 io_mc_regs = (u32 *)&tahiti_io_mc_regs; 1538 io_mc_regs = (u32 *)&tahiti_io_mc_regs;
1486 regs_size = TAHITI_IO_MC_REGS_SIZE; 1539 regs_size = TAHITI_IO_MC_REGS_SIZE;
1487 break; 1540 break;
1488 case CHIP_PITCAIRN: 1541 case CHIP_PITCAIRN:
1489 io_mc_regs = (u32 *)&pitcairn_io_mc_regs; 1542 io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
1490 regs_size = TAHITI_IO_MC_REGS_SIZE; 1543 regs_size = TAHITI_IO_MC_REGS_SIZE;
1491 break; 1544 break;
1492 case CHIP_VERDE: 1545 case CHIP_VERDE:
1493 default: 1546 default:
1494 io_mc_regs = (u32 *)&verde_io_mc_regs; 1547 io_mc_regs = (u32 *)&verde_io_mc_regs;
1495 regs_size = TAHITI_IO_MC_REGS_SIZE; 1548 regs_size = TAHITI_IO_MC_REGS_SIZE;
1496 break; 1549 break;
1497 case CHIP_OLAND: 1550 case CHIP_OLAND:
1498 io_mc_regs = (u32 *)&oland_io_mc_regs; 1551 io_mc_regs = (u32 *)&oland_io_mc_regs;
1499 regs_size = TAHITI_IO_MC_REGS_SIZE; 1552 regs_size = TAHITI_IO_MC_REGS_SIZE;
1500 break; 1553 break;
1501 case CHIP_HAINAN: 1554 case CHIP_HAINAN:
1502 io_mc_regs = (u32 *)&hainan_io_mc_regs; 1555 io_mc_regs = (u32 *)&hainan_io_mc_regs;
1503 regs_size = TAHITI_IO_MC_REGS_SIZE; 1556 regs_size = TAHITI_IO_MC_REGS_SIZE;
1504 break; 1557 break;
1558 }
1559 fw_data = (const __be32 *)rdev->mc_fw->data;
1505 } 1560 }
1506 1561
1507 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 1562 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -1518,13 +1573,21 @@ int si_mc_load_microcode(struct radeon_device *rdev)
1518 1573
1519 /* load mc io regs */ 1574 /* load mc io regs */
1520 for (i = 0; i < regs_size; i++) { 1575 for (i = 0; i < regs_size; i++) {
1521 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); 1576 if (rdev->new_fw) {
1522 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); 1577 WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
1578 WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
1579 } else {
1580 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
1581 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
1582 }
1523 } 1583 }
1524 /* load the MC ucode */ 1584 /* load the MC ucode */
1525 fw_data = (const __be32 *)rdev->mc_fw->data; 1585 for (i = 0; i < ucode_size; i++) {
1526 for (i = 0; i < ucode_size; i++) 1586 if (rdev->new_fw)
1527 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); 1587 WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
1588 else
1589 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
1590 }
1528 1591
1529 /* put the engine back into the active state */ 1592 /* put the engine back into the active state */
1530 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 1593 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
@@ -1553,18 +1616,19 @@ int si_mc_load_microcode(struct radeon_device *rdev)
1553static int si_init_microcode(struct radeon_device *rdev) 1616static int si_init_microcode(struct radeon_device *rdev)
1554{ 1617{
1555 const char *chip_name; 1618 const char *chip_name;
1556 const char *rlc_chip_name; 1619 const char *new_chip_name;
1557 size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size; 1620 size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
1558 size_t smc_req_size, mc2_req_size; 1621 size_t smc_req_size, mc2_req_size;
1559 char fw_name[30]; 1622 char fw_name[30];
1560 int err; 1623 int err;
1624 int new_fw = 0;
1561 1625
1562 DRM_DEBUG("\n"); 1626 DRM_DEBUG("\n");
1563 1627
1564 switch (rdev->family) { 1628 switch (rdev->family) {
1565 case CHIP_TAHITI: 1629 case CHIP_TAHITI:
1566 chip_name = "TAHITI"; 1630 chip_name = "TAHITI";
1567 rlc_chip_name = "TAHITI"; 1631 new_chip_name = "tahiti";
1568 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1632 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1569 me_req_size = SI_PM4_UCODE_SIZE * 4; 1633 me_req_size = SI_PM4_UCODE_SIZE * 4;
1570 ce_req_size = SI_CE_UCODE_SIZE * 4; 1634 ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1575,7 +1639,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1575 break; 1639 break;
1576 case CHIP_PITCAIRN: 1640 case CHIP_PITCAIRN:
1577 chip_name = "PITCAIRN"; 1641 chip_name = "PITCAIRN";
1578 rlc_chip_name = "PITCAIRN"; 1642 new_chip_name = "pitcairn";
1579 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1643 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1580 me_req_size = SI_PM4_UCODE_SIZE * 4; 1644 me_req_size = SI_PM4_UCODE_SIZE * 4;
1581 ce_req_size = SI_CE_UCODE_SIZE * 4; 1645 ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1586,7 +1650,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1586 break; 1650 break;
1587 case CHIP_VERDE: 1651 case CHIP_VERDE:
1588 chip_name = "VERDE"; 1652 chip_name = "VERDE";
1589 rlc_chip_name = "VERDE"; 1653 new_chip_name = "verde";
1590 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1654 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1591 me_req_size = SI_PM4_UCODE_SIZE * 4; 1655 me_req_size = SI_PM4_UCODE_SIZE * 4;
1592 ce_req_size = SI_CE_UCODE_SIZE * 4; 1656 ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1597,7 +1661,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1597 break; 1661 break;
1598 case CHIP_OLAND: 1662 case CHIP_OLAND:
1599 chip_name = "OLAND"; 1663 chip_name = "OLAND";
1600 rlc_chip_name = "OLAND"; 1664 new_chip_name = "oland";
1601 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1665 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1602 me_req_size = SI_PM4_UCODE_SIZE * 4; 1666 me_req_size = SI_PM4_UCODE_SIZE * 4;
1603 ce_req_size = SI_CE_UCODE_SIZE * 4; 1667 ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1607,7 +1671,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1607 break; 1671 break;
1608 case CHIP_HAINAN: 1672 case CHIP_HAINAN:
1609 chip_name = "HAINAN"; 1673 chip_name = "HAINAN";
1610 rlc_chip_name = "HAINAN"; 1674 new_chip_name = "hainan";
1611 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1675 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1612 me_req_size = SI_PM4_UCODE_SIZE * 4; 1676 me_req_size = SI_PM4_UCODE_SIZE * 4;
1613 ce_req_size = SI_CE_UCODE_SIZE * 4; 1677 ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1618,86 +1682,178 @@ static int si_init_microcode(struct radeon_device *rdev)
1618 default: BUG(); 1682 default: BUG();
1619 } 1683 }
1620 1684
1621 DRM_INFO("Loading %s Microcode\n", chip_name); 1685 DRM_INFO("Loading %s Microcode\n", new_chip_name);
1622 1686
1623 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 1687 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
1624 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); 1688 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1625 if (err) 1689 if (err) {
1626 goto out; 1690 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1627 if (rdev->pfp_fw->size != pfp_req_size) { 1691 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1628 printk(KERN_ERR 1692 if (err)
1629 "si_cp: Bogus length %zu in firmware \"%s\"\n", 1693 goto out;
1630 rdev->pfp_fw->size, fw_name); 1694 if (rdev->pfp_fw->size != pfp_req_size) {
1631 err = -EINVAL; 1695 printk(KERN_ERR
1632 goto out; 1696 "si_cp: Bogus length %zu in firmware \"%s\"\n",
1697 rdev->pfp_fw->size, fw_name);
1698 err = -EINVAL;
1699 goto out;
1700 }
1701 } else {
1702 err = radeon_ucode_validate(rdev->pfp_fw);
1703 if (err) {
1704 printk(KERN_ERR
1705 "si_cp: validation failed for firmware \"%s\"\n",
1706 fw_name);
1707 goto out;
1708 } else {
1709 new_fw++;
1710 }
1633 } 1711 }
1634 1712
1635 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 1713 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
1636 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); 1714 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1637 if (err) 1715 if (err) {
1638 goto out; 1716 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1639 if (rdev->me_fw->size != me_req_size) { 1717 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1640 printk(KERN_ERR 1718 if (err)
1641 "si_cp: Bogus length %zu in firmware \"%s\"\n", 1719 goto out;
1642 rdev->me_fw->size, fw_name); 1720 if (rdev->me_fw->size != me_req_size) {
1643 err = -EINVAL; 1721 printk(KERN_ERR
1722 "si_cp: Bogus length %zu in firmware \"%s\"\n",
1723 rdev->me_fw->size, fw_name);
1724 err = -EINVAL;
1725 }
1726 } else {
1727 err = radeon_ucode_validate(rdev->me_fw);
1728 if (err) {
1729 printk(KERN_ERR
1730 "si_cp: validation failed for firmware \"%s\"\n",
1731 fw_name);
1732 goto out;
1733 } else {
1734 new_fw++;
1735 }
1644 } 1736 }
1645 1737
1646 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); 1738 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
1647 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); 1739 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1648 if (err) 1740 if (err) {
1649 goto out; 1741 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
1650 if (rdev->ce_fw->size != ce_req_size) { 1742 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1651 printk(KERN_ERR 1743 if (err)
1652 "si_cp: Bogus length %zu in firmware \"%s\"\n", 1744 goto out;
1653 rdev->ce_fw->size, fw_name); 1745 if (rdev->ce_fw->size != ce_req_size) {
1654 err = -EINVAL; 1746 printk(KERN_ERR
1747 "si_cp: Bogus length %zu in firmware \"%s\"\n",
1748 rdev->ce_fw->size, fw_name);
1749 err = -EINVAL;
1750 }
1751 } else {
1752 err = radeon_ucode_validate(rdev->ce_fw);
1753 if (err) {
1754 printk(KERN_ERR
1755 "si_cp: validation failed for firmware \"%s\"\n",
1756 fw_name);
1757 goto out;
1758 } else {
1759 new_fw++;
1760 }
1655 } 1761 }
1656 1762
1657 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); 1763 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
1658 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); 1764 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1659 if (err) 1765 if (err) {
1660 goto out; 1766 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
1661 if (rdev->rlc_fw->size != rlc_req_size) { 1767 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1662 printk(KERN_ERR 1768 if (err)
1663 "si_rlc: Bogus length %zu in firmware \"%s\"\n", 1769 goto out;
1664 rdev->rlc_fw->size, fw_name); 1770 if (rdev->rlc_fw->size != rlc_req_size) {
1665 err = -EINVAL; 1771 printk(KERN_ERR
1772 "si_rlc: Bogus length %zu in firmware \"%s\"\n",
1773 rdev->rlc_fw->size, fw_name);
1774 err = -EINVAL;
1775 }
1776 } else {
1777 err = radeon_ucode_validate(rdev->rlc_fw);
1778 if (err) {
1779 printk(KERN_ERR
1780 "si_cp: validation failed for firmware \"%s\"\n",
1781 fw_name);
1782 goto out;
1783 } else {
1784 new_fw++;
1785 }
1666 } 1786 }
1667 1787
1668 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); 1788 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
1669 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 1789 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1670 if (err) { 1790 if (err) {
1671 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 1791 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
1672 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 1792 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1673 if (err) 1793 if (err) {
1794 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
1795 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1796 if (err)
1797 goto out;
1798 }
1799 if ((rdev->mc_fw->size != mc_req_size) &&
1800 (rdev->mc_fw->size != mc2_req_size)) {
1801 printk(KERN_ERR
1802 "si_mc: Bogus length %zu in firmware \"%s\"\n",
1803 rdev->mc_fw->size, fw_name);
1804 err = -EINVAL;
1805 }
1806 DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
1807 } else {
1808 err = radeon_ucode_validate(rdev->mc_fw);
1809 if (err) {
1810 printk(KERN_ERR
1811 "si_cp: validation failed for firmware \"%s\"\n",
1812 fw_name);
1674 goto out; 1813 goto out;
1814 } else {
1815 new_fw++;
1816 }
1675 } 1817 }
1676 if ((rdev->mc_fw->size != mc_req_size) &&
1677 (rdev->mc_fw->size != mc2_req_size)) {
1678 printk(KERN_ERR
1679 "si_mc: Bogus length %zu in firmware \"%s\"\n",
1680 rdev->mc_fw->size, fw_name);
1681 err = -EINVAL;
1682 }
1683 DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
1684 1818
1685 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); 1819 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
1686 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); 1820 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1687 if (err) { 1821 if (err) {
1688 printk(KERN_ERR 1822 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
1689 "smc: error loading firmware \"%s\"\n", 1823 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1690 fw_name); 1824 if (err) {
1691 release_firmware(rdev->smc_fw); 1825 printk(KERN_ERR
1692 rdev->smc_fw = NULL; 1826 "smc: error loading firmware \"%s\"\n",
1693 err = 0; 1827 fw_name);
1694 } else if (rdev->smc_fw->size != smc_req_size) { 1828 release_firmware(rdev->smc_fw);
1695 printk(KERN_ERR 1829 rdev->smc_fw = NULL;
1696 "si_smc: Bogus length %zu in firmware \"%s\"\n", 1830 err = 0;
1697 rdev->smc_fw->size, fw_name); 1831 } else if (rdev->smc_fw->size != smc_req_size) {
1698 err = -EINVAL; 1832 printk(KERN_ERR
1833 "si_smc: Bogus length %zu in firmware \"%s\"\n",
1834 rdev->smc_fw->size, fw_name);
1835 err = -EINVAL;
1836 }
1837 } else {
1838 err = radeon_ucode_validate(rdev->smc_fw);
1839 if (err) {
1840 printk(KERN_ERR
1841 "si_cp: validation failed for firmware \"%s\"\n",
1842 fw_name);
1843 goto out;
1844 } else {
1845 new_fw++;
1846 }
1699 } 1847 }
1700 1848
1849 if (new_fw == 0) {
1850 rdev->new_fw = false;
1851 } else if (new_fw < 6) {
1852 printk(KERN_ERR "si_fw: mixing new and old firmware!\n");
1853 err = -EINVAL;
1854 } else {
1855 rdev->new_fw = true;
1856 }
1701out: 1857out:
1702 if (err) { 1858 if (err) {
1703 if (err != -EINVAL) 1859 if (err != -EINVAL)
@@ -3282,34 +3438,77 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
3282 3438
3283static int si_cp_load_microcode(struct radeon_device *rdev) 3439static int si_cp_load_microcode(struct radeon_device *rdev)
3284{ 3440{
3285 const __be32 *fw_data;
3286 int i; 3441 int i;
3287 3442
3288 if (!rdev->me_fw || !rdev->pfp_fw) 3443 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
3289 return -EINVAL; 3444 return -EINVAL;
3290 3445
3291 si_cp_enable(rdev, false); 3446 si_cp_enable(rdev, false);
3292 3447
3293 /* PFP */ 3448 if (rdev->new_fw) {
3294 fw_data = (const __be32 *)rdev->pfp_fw->data; 3449 const struct gfx_firmware_header_v1_0 *pfp_hdr =
3295 WREG32(CP_PFP_UCODE_ADDR, 0); 3450 (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
3296 for (i = 0; i < SI_PFP_UCODE_SIZE; i++) 3451 const struct gfx_firmware_header_v1_0 *ce_hdr =
3297 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 3452 (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
3298 WREG32(CP_PFP_UCODE_ADDR, 0); 3453 const struct gfx_firmware_header_v1_0 *me_hdr =
3299 3454 (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
3300 /* CE */ 3455 const __le32 *fw_data;
3301 fw_data = (const __be32 *)rdev->ce_fw->data; 3456 u32 fw_size;
3302 WREG32(CP_CE_UCODE_ADDR, 0); 3457
3303 for (i = 0; i < SI_CE_UCODE_SIZE; i++) 3458 radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
3304 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++)); 3459 radeon_ucode_print_gfx_hdr(&ce_hdr->header);
3305 WREG32(CP_CE_UCODE_ADDR, 0); 3460 radeon_ucode_print_gfx_hdr(&me_hdr->header);
3306 3461
3307 /* ME */ 3462 /* PFP */
3308 fw_data = (const __be32 *)rdev->me_fw->data; 3463 fw_data = (const __le32 *)
3309 WREG32(CP_ME_RAM_WADDR, 0); 3464 (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3310 for (i = 0; i < SI_PM4_UCODE_SIZE; i++) 3465 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3311 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 3466 WREG32(CP_PFP_UCODE_ADDR, 0);
3312 WREG32(CP_ME_RAM_WADDR, 0); 3467 for (i = 0; i < fw_size; i++)
3468 WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3469 WREG32(CP_PFP_UCODE_ADDR, 0);
3470
3471 /* CE */
3472 fw_data = (const __le32 *)
3473 (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3474 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3475 WREG32(CP_CE_UCODE_ADDR, 0);
3476 for (i = 0; i < fw_size; i++)
3477 WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3478 WREG32(CP_CE_UCODE_ADDR, 0);
3479
3480 /* ME */
3481 fw_data = (const __be32 *)
3482 (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3483 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3484 WREG32(CP_ME_RAM_WADDR, 0);
3485 for (i = 0; i < fw_size; i++)
3486 WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3487 WREG32(CP_ME_RAM_WADDR, 0);
3488 } else {
3489 const __be32 *fw_data;
3490
3491 /* PFP */
3492 fw_data = (const __be32 *)rdev->pfp_fw->data;
3493 WREG32(CP_PFP_UCODE_ADDR, 0);
3494 for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
3495 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
3496 WREG32(CP_PFP_UCODE_ADDR, 0);
3497
3498 /* CE */
3499 fw_data = (const __be32 *)rdev->ce_fw->data;
3500 WREG32(CP_CE_UCODE_ADDR, 0);
3501 for (i = 0; i < SI_CE_UCODE_SIZE; i++)
3502 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
3503 WREG32(CP_CE_UCODE_ADDR, 0);
3504
3505 /* ME */
3506 fw_data = (const __be32 *)rdev->me_fw->data;
3507 WREG32(CP_ME_RAM_WADDR, 0);
3508 for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
3509 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
3510 WREG32(CP_ME_RAM_WADDR, 0);
3511 }
3313 3512
3314 WREG32(CP_PFP_UCODE_ADDR, 0); 3513 WREG32(CP_PFP_UCODE_ADDR, 0);
3315 WREG32(CP_CE_UCODE_ADDR, 0); 3514 WREG32(CP_CE_UCODE_ADDR, 0);
@@ -4048,7 +4247,6 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
4048 r = radeon_gart_table_vram_pin(rdev); 4247 r = radeon_gart_table_vram_pin(rdev);
4049 if (r) 4248 if (r)
4050 return r; 4249 return r;
4051 radeon_gart_restore(rdev);
4052 /* Setup TLB control */ 4250 /* Setup TLB control */
4053 WREG32(MC_VM_MX_L1_TLB_CNTL, 4251 WREG32(MC_VM_MX_L1_TLB_CNTL,
4054 (0xA << 7) | 4252 (0xA << 7) |
@@ -4815,7 +5013,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
4815 5013
4816 /* write new base address */ 5014 /* write new base address */
4817 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5015 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4818 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5016 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
4819 WRITE_DATA_DST_SEL(0))); 5017 WRITE_DATA_DST_SEL(0)));
4820 5018
4821 if (vm->id < 8) { 5019 if (vm->id < 8) {
@@ -5592,7 +5790,6 @@ static void si_enable_lbpw(struct radeon_device *rdev, bool enable)
5592static int si_rlc_resume(struct radeon_device *rdev) 5790static int si_rlc_resume(struct radeon_device *rdev)
5593{ 5791{
5594 u32 i; 5792 u32 i;
5595 const __be32 *fw_data;
5596 5793
5597 if (!rdev->rlc_fw) 5794 if (!rdev->rlc_fw)
5598 return -EINVAL; 5795 return -EINVAL;
@@ -5615,10 +5812,26 @@ static int si_rlc_resume(struct radeon_device *rdev)
5615 WREG32(RLC_MC_CNTL, 0); 5812 WREG32(RLC_MC_CNTL, 0);
5616 WREG32(RLC_UCODE_CNTL, 0); 5813 WREG32(RLC_UCODE_CNTL, 0);
5617 5814
5618 fw_data = (const __be32 *)rdev->rlc_fw->data; 5815 if (rdev->new_fw) {
5619 for (i = 0; i < SI_RLC_UCODE_SIZE; i++) { 5816 const struct rlc_firmware_header_v1_0 *hdr =
5620 WREG32(RLC_UCODE_ADDR, i); 5817 (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
5621 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 5818 u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
5819 const __le32 *fw_data = (const __le32 *)
5820 (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
5821
5822 radeon_ucode_print_rlc_hdr(&hdr->header);
5823
5824 for (i = 0; i < fw_size; i++) {
5825 WREG32(RLC_UCODE_ADDR, i);
5826 WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
5827 }
5828 } else {
5829 const __be32 *fw_data =
5830 (const __be32 *)rdev->rlc_fw->data;
5831 for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
5832 WREG32(RLC_UCODE_ADDR, i);
5833 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
5834 }
5622 } 5835 }
5623 WREG32(RLC_UCODE_ADDR, 0); 5836 WREG32(RLC_UCODE_ADDR, 0);
5624 5837
@@ -6318,7 +6531,8 @@ restart_ih:
6318 case 16: /* D5 page flip */ 6531 case 16: /* D5 page flip */
6319 case 18: /* D6 page flip */ 6532 case 18: /* D6 page flip */
6320 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); 6533 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
6321 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); 6534 if (radeon_use_pflipirq > 0)
6535 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
6322 break; 6536 break;
6323 case 42: /* HPD hotplug */ 6537 case 42: /* HPD hotplug */
6324 switch (src_data) { 6538 switch (src_data) {
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index e24c94b6d14d..716505129450 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -56,7 +56,41 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
56} 56}
57 57
58/** 58/**
59 * si_dma_vm_set_page - update the page tables using the DMA 59 * si_dma_vm_copy_pages - update PTEs by copying them from the GART
60 *
61 * @rdev: radeon_device pointer
62 * @ib: indirect buffer to fill with commands
63 * @pe: addr of the page entry
64 * @src: src addr where to copy from
65 * @count: number of page entries to update
66 *
67 * Update PTEs by copying them from the GART using the DMA (SI).
68 */
69void si_dma_vm_copy_pages(struct radeon_device *rdev,
70 struct radeon_ib *ib,
71 uint64_t pe, uint64_t src,
72 unsigned count)
73{
74 while (count) {
75 unsigned bytes = count * 8;
76 if (bytes > 0xFFFF8)
77 bytes = 0xFFFF8;
78
79 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
80 1, 0, 0, bytes);
81 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
82 ib->ptr[ib->length_dw++] = lower_32_bits(src);
83 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
84 ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
85
86 pe += bytes;
87 src += bytes;
88 count -= bytes / 8;
89 }
90}
91
92/**
93 * si_dma_vm_write_pages - update PTEs by writing them manually
60 * 94 *
61 * @rdev: radeon_device pointer 95 * @rdev: radeon_device pointer
62 * @ib: indirect buffer to fill with commands 96 * @ib: indirect buffer to fill with commands
@@ -66,83 +100,89 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
66 * @incr: increase next addr by incr bytes 100 * @incr: increase next addr by incr bytes
67 * @flags: access flags 101 * @flags: access flags
68 * 102 *
69 * Update the page tables using the DMA (SI). 103 * Update PTEs by writing them manually using the DMA (SI).
70 */ 104 */
71void si_dma_vm_set_page(struct radeon_device *rdev, 105void si_dma_vm_write_pages(struct radeon_device *rdev,
72 struct radeon_ib *ib, 106 struct radeon_ib *ib,
73 uint64_t pe, 107 uint64_t pe,
74 uint64_t addr, unsigned count, 108 uint64_t addr, unsigned count,
75 uint32_t incr, uint32_t flags) 109 uint32_t incr, uint32_t flags)
76{ 110{
77 uint64_t value; 111 uint64_t value;
78 unsigned ndw; 112 unsigned ndw;
79 113
80 trace_radeon_vm_set_page(pe, addr, count, incr, flags); 114 while (count) {
81 115 ndw = count * 2;
82 if (flags == R600_PTE_GART) { 116 if (ndw > 0xFFFFE)
83 uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; 117 ndw = 0xFFFFE;
84 while (count) { 118
85 unsigned bytes = count * 8; 119 /* for non-physically contiguous pages (system) */
86 if (bytes > 0xFFFF8) 120 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
87 bytes = 0xFFFF8; 121 ib->ptr[ib->length_dw++] = pe;
88 122 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
89 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, 123 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
90 1, 0, 0, bytes); 124 if (flags & R600_PTE_SYSTEM) {
91 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
92 ib->ptr[ib->length_dw++] = lower_32_bits(src);
93 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
94 ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
95
96 pe += bytes;
97 src += bytes;
98 count -= bytes / 8;
99 }
100 } else if (flags & R600_PTE_SYSTEM) {
101 while (count) {
102 ndw = count * 2;
103 if (ndw > 0xFFFFE)
104 ndw = 0xFFFFE;
105
106 /* for non-physically contiguous pages (system) */
107 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
108 ib->ptr[ib->length_dw++] = pe;
109 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
110 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
111 value = radeon_vm_map_gart(rdev, addr); 125 value = radeon_vm_map_gart(rdev, addr);
112 value &= 0xFFFFFFFFFFFFF000ULL; 126 value &= 0xFFFFFFFFFFFFF000ULL;
113 addr += incr; 127 } else if (flags & R600_PTE_VALID) {
114 value |= flags;
115 ib->ptr[ib->length_dw++] = value;
116 ib->ptr[ib->length_dw++] = upper_32_bits(value);
117 }
118 }
119 } else {
120 while (count) {
121 ndw = count * 2;
122 if (ndw > 0xFFFFE)
123 ndw = 0xFFFFE;
124
125 if (flags & R600_PTE_VALID)
126 value = addr; 128 value = addr;
127 else 129 } else {
128 value = 0; 130 value = 0;
129 /* for physically contiguous pages (vram) */ 131 }
130 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); 132 addr += incr;
131 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 133 value |= flags;
132 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 134 ib->ptr[ib->length_dw++] = value;
133 ib->ptr[ib->length_dw++] = flags; /* mask */
134 ib->ptr[ib->length_dw++] = 0;
135 ib->ptr[ib->length_dw++] = value; /* value */
136 ib->ptr[ib->length_dw++] = upper_32_bits(value); 135 ib->ptr[ib->length_dw++] = upper_32_bits(value);
137 ib->ptr[ib->length_dw++] = incr; /* increment size */
138 ib->ptr[ib->length_dw++] = 0;
139 pe += ndw * 4;
140 addr += (ndw / 2) * incr;
141 count -= ndw / 2;
142 } 136 }
143 } 137 }
144 while (ib->length_dw & 0x7) 138}
145 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0); 139
140/**
141 * si_dma_vm_set_pages - update the page tables using the DMA
142 *
143 * @rdev: radeon_device pointer
144 * @ib: indirect buffer to fill with commands
145 * @pe: addr of the page entry
146 * @addr: dst addr to write into pe
147 * @count: number of page entries to update
148 * @incr: increase next addr by incr bytes
149 * @flags: access flags
150 *
151 * Update the page tables using the DMA (SI).
152 */
153void si_dma_vm_set_pages(struct radeon_device *rdev,
154 struct radeon_ib *ib,
155 uint64_t pe,
156 uint64_t addr, unsigned count,
157 uint32_t incr, uint32_t flags)
158{
159 uint64_t value;
160 unsigned ndw;
161
162 while (count) {
163 ndw = count * 2;
164 if (ndw > 0xFFFFE)
165 ndw = 0xFFFFE;
166
167 if (flags & R600_PTE_VALID)
168 value = addr;
169 else
170 value = 0;
171
172 /* for physically contiguous pages (vram) */
173 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
174 ib->ptr[ib->length_dw++] = pe; /* dst addr */
175 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
176 ib->ptr[ib->length_dw++] = flags; /* mask */
177 ib->ptr[ib->length_dw++] = 0;
178 ib->ptr[ib->length_dw++] = value; /* value */
179 ib->ptr[ib->length_dw++] = upper_32_bits(value);
180 ib->ptr[ib->length_dw++] = incr; /* increment size */
181 ib->ptr[ib->length_dw++] = 0;
182 pe += ndw * 4;
183 addr += (ndw / 2) * incr;
184 count -= ndw / 2;
185 }
146} 186}
147 187
148void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 188void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 58918868f894..70e61ffeace2 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3812,6 +3812,27 @@ void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
3812 voltage_table->count = max_voltage_steps; 3812 voltage_table->count = max_voltage_steps;
3813} 3813}
3814 3814
3815static int si_get_svi2_voltage_table(struct radeon_device *rdev,
3816 struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
3817 struct atom_voltage_table *voltage_table)
3818{
3819 u32 i;
3820
3821 if (voltage_dependency_table == NULL)
3822 return -EINVAL;
3823
3824 voltage_table->mask_low = 0;
3825 voltage_table->phase_delay = 0;
3826
3827 voltage_table->count = voltage_dependency_table->count;
3828 for (i = 0; i < voltage_table->count; i++) {
3829 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
3830 voltage_table->entries[i].smio_low = 0;
3831 }
3832
3833 return 0;
3834}
3835
3815static int si_construct_voltage_tables(struct radeon_device *rdev) 3836static int si_construct_voltage_tables(struct radeon_device *rdev)
3816{ 3837{
3817 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 3838 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
@@ -3819,15 +3840,25 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
3819 struct si_power_info *si_pi = si_get_pi(rdev); 3840 struct si_power_info *si_pi = si_get_pi(rdev);
3820 int ret; 3841 int ret;
3821 3842
3822 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC, 3843 if (pi->voltage_control) {
3823 VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table); 3844 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
3824 if (ret) 3845 VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
3825 return ret; 3846 if (ret)
3847 return ret;
3826 3848
3827 if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) 3849 if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
3828 si_trim_voltage_table_to_fit_state_table(rdev, 3850 si_trim_voltage_table_to_fit_state_table(rdev,
3829 SISLANDS_MAX_NO_VREG_STEPS, 3851 SISLANDS_MAX_NO_VREG_STEPS,
3830 &eg_pi->vddc_voltage_table); 3852 &eg_pi->vddc_voltage_table);
3853 } else if (si_pi->voltage_control_svi2) {
3854 ret = si_get_svi2_voltage_table(rdev,
3855 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
3856 &eg_pi->vddc_voltage_table);
3857 if (ret)
3858 return ret;
3859 } else {
3860 return -EINVAL;
3861 }
3831 3862
3832 if (eg_pi->vddci_control) { 3863 if (eg_pi->vddci_control) {
3833 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI, 3864 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
@@ -3840,6 +3871,13 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
3840 SISLANDS_MAX_NO_VREG_STEPS, 3871 SISLANDS_MAX_NO_VREG_STEPS,
3841 &eg_pi->vddci_voltage_table); 3872 &eg_pi->vddci_voltage_table);
3842 } 3873 }
3874 if (si_pi->vddci_control_svi2) {
3875 ret = si_get_svi2_voltage_table(rdev,
3876 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3877 &eg_pi->vddci_voltage_table);
3878 if (ret)
3879 return ret;
3880 }
3843 3881
3844 if (pi->mvdd_control) { 3882 if (pi->mvdd_control) {
3845 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC, 3883 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
@@ -3893,46 +3931,55 @@ static int si_populate_smc_voltage_tables(struct radeon_device *rdev,
3893 struct si_power_info *si_pi = si_get_pi(rdev); 3931 struct si_power_info *si_pi = si_get_pi(rdev);
3894 u8 i; 3932 u8 i;
3895 3933
3896 if (eg_pi->vddc_voltage_table.count) { 3934 if (si_pi->voltage_control_svi2) {
3897 si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table); 3935 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
3898 table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] = 3936 si_pi->svc_gpio_id);
3899 cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); 3937 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
3900 3938 si_pi->svd_gpio_id);
3901 for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) { 3939 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
3902 if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) { 3940 2);
3903 table->maxVDDCIndexInPPTable = i; 3941 } else {
3904 break; 3942 if (eg_pi->vddc_voltage_table.count) {
3943 si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
3944 table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
3945 cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
3946
3947 for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
3948 if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
3949 table->maxVDDCIndexInPPTable = i;
3950 break;
3951 }
3905 } 3952 }
3906 } 3953 }
3907 }
3908 3954
3909 if (eg_pi->vddci_voltage_table.count) { 3955 if (eg_pi->vddci_voltage_table.count) {
3910 si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table); 3956 si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
3911 3957
3912 table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] = 3958 table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
3913 cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); 3959 cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
3914 } 3960 }
3915 3961
3916 3962
3917 if (si_pi->mvdd_voltage_table.count) { 3963 if (si_pi->mvdd_voltage_table.count) {
3918 si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table); 3964 si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table);
3919 3965
3920 table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] = 3966 table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
3921 cpu_to_be32(si_pi->mvdd_voltage_table.mask_low); 3967 cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
3922 } 3968 }
3923 3969
3924 if (si_pi->vddc_phase_shed_control) { 3970 if (si_pi->vddc_phase_shed_control) {
3925 if (si_validate_phase_shedding_tables(rdev, &si_pi->vddc_phase_shed_table, 3971 if (si_validate_phase_shedding_tables(rdev, &si_pi->vddc_phase_shed_table,
3926 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) { 3972 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
3927 si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table); 3973 si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
3928 3974
3929 table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] = 3975 table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
3930 cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low); 3976 cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
3931 3977
3932 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay, 3978 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
3933 (u32)si_pi->vddc_phase_shed_table.phase_delay); 3979 (u32)si_pi->vddc_phase_shed_table.phase_delay);
3934 } else { 3980 } else {
3935 si_pi->vddc_phase_shed_control = false; 3981 si_pi->vddc_phase_shed_control = false;
3982 }
3936 } 3983 }
3937 } 3984 }
3938 3985
@@ -5798,16 +5845,17 @@ int si_dpm_enable(struct radeon_device *rdev)
5798{ 5845{
5799 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 5846 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
5800 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 5847 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
5848 struct si_power_info *si_pi = si_get_pi(rdev);
5801 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; 5849 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5802 int ret; 5850 int ret;
5803 5851
5804 if (si_is_smc_running(rdev)) 5852 if (si_is_smc_running(rdev))
5805 return -EINVAL; 5853 return -EINVAL;
5806 if (pi->voltage_control) 5854 if (pi->voltage_control || si_pi->voltage_control_svi2)
5807 si_enable_voltage_control(rdev, true); 5855 si_enable_voltage_control(rdev, true);
5808 if (pi->mvdd_control) 5856 if (pi->mvdd_control)
5809 si_get_mvdd_configuration(rdev); 5857 si_get_mvdd_configuration(rdev);
5810 if (pi->voltage_control) { 5858 if (pi->voltage_control || si_pi->voltage_control_svi2) {
5811 ret = si_construct_voltage_tables(rdev); 5859 ret = si_construct_voltage_tables(rdev);
5812 if (ret) { 5860 if (ret) {
5813 DRM_ERROR("si_construct_voltage_tables failed\n"); 5861 DRM_ERROR("si_construct_voltage_tables failed\n");
@@ -6406,16 +6454,32 @@ int si_dpm_init(struct radeon_device *rdev)
6406 ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold; 6454 ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
6407 6455
6408 pi->voltage_control = 6456 pi->voltage_control =
6409 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_GPIO_LUT); 6457 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
6458 VOLTAGE_OBJ_GPIO_LUT);
6459 if (!pi->voltage_control) {
6460 si_pi->voltage_control_svi2 =
6461 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
6462 VOLTAGE_OBJ_SVID2);
6463 if (si_pi->voltage_control_svi2)
6464 radeon_atom_get_svi2_info(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
6465 &si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
6466 }
6410 6467
6411 pi->mvdd_control = 6468 pi->mvdd_control =
6412 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, VOLTAGE_OBJ_GPIO_LUT); 6469 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
6470 VOLTAGE_OBJ_GPIO_LUT);
6413 6471
6414 eg_pi->vddci_control = 6472 eg_pi->vddci_control =
6415 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, VOLTAGE_OBJ_GPIO_LUT); 6473 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
6474 VOLTAGE_OBJ_GPIO_LUT);
6475 if (!eg_pi->vddci_control)
6476 si_pi->vddci_control_svi2 =
6477 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
6478 VOLTAGE_OBJ_SVID2);
6416 6479
6417 si_pi->vddc_phase_shed_control = 6480 si_pi->vddc_phase_shed_control =
6418 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT); 6481 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
6482 VOLTAGE_OBJ_PHASE_LUT);
6419 6483
6420 rv770_get_engine_memory_ss(rdev); 6484 rv770_get_engine_memory_ss(rdev);
6421 6485
diff --git a/drivers/gpu/drm/radeon/si_dpm.h b/drivers/gpu/drm/radeon/si_dpm.h
index 4ce5032cdf49..8b5c06a0832d 100644
--- a/drivers/gpu/drm/radeon/si_dpm.h
+++ b/drivers/gpu/drm/radeon/si_dpm.h
@@ -170,6 +170,8 @@ struct si_power_info {
170 bool vddc_phase_shed_control; 170 bool vddc_phase_shed_control;
171 bool pspp_notify_required; 171 bool pspp_notify_required;
172 bool sclk_deep_sleep_above_low; 172 bool sclk_deep_sleep_above_low;
173 bool voltage_control_svi2;
174 bool vddci_control_svi2;
173 /* smc offsets */ 175 /* smc offsets */
174 u32 sram_end; 176 u32 sram_end;
175 u32 state_table_start; 177 u32 state_table_start;
@@ -192,6 +194,9 @@ struct si_power_info {
192 SMC_SIslands_MCRegisters smc_mc_reg_table; 194 SMC_SIslands_MCRegisters smc_mc_reg_table;
193 SISLANDS_SMC_STATETABLE smc_statetable; 195 SISLANDS_SMC_STATETABLE smc_statetable;
194 PP_SIslands_PAPMParameters papm_parm; 196 PP_SIslands_PAPMParameters papm_parm;
197 /* SVI2 */
198 u8 svd_gpio_id;
199 u8 svc_gpio_id;
195}; 200};
196 201
197#define SISLANDS_INITIAL_STATE_ARB_INDEX 0 202#define SISLANDS_INITIAL_STATE_ARB_INDEX 0
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index e80efcf0c230..73dbc79c959d 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -219,36 +219,48 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
219 if (!rdev->smc_fw) 219 if (!rdev->smc_fw)
220 return -EINVAL; 220 return -EINVAL;
221 221
222 switch (rdev->family) { 222 if (rdev->new_fw) {
223 case CHIP_TAHITI: 223 const struct smc_firmware_header_v1_0 *hdr =
224 ucode_start_address = TAHITI_SMC_UCODE_START; 224 (const struct smc_firmware_header_v1_0 *)rdev->smc_fw->data;
225 ucode_size = TAHITI_SMC_UCODE_SIZE; 225
226 break; 226 radeon_ucode_print_smc_hdr(&hdr->header);
227 case CHIP_PITCAIRN: 227
228 ucode_start_address = PITCAIRN_SMC_UCODE_START; 228 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
229 ucode_size = PITCAIRN_SMC_UCODE_SIZE; 229 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
230 break; 230 src = (const u8 *)
231 case CHIP_VERDE: 231 (rdev->smc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
232 ucode_start_address = VERDE_SMC_UCODE_START; 232 } else {
233 ucode_size = VERDE_SMC_UCODE_SIZE; 233 switch (rdev->family) {
234 break; 234 case CHIP_TAHITI:
235 case CHIP_OLAND: 235 ucode_start_address = TAHITI_SMC_UCODE_START;
236 ucode_start_address = OLAND_SMC_UCODE_START; 236 ucode_size = TAHITI_SMC_UCODE_SIZE;
237 ucode_size = OLAND_SMC_UCODE_SIZE; 237 break;
238 break; 238 case CHIP_PITCAIRN:
239 case CHIP_HAINAN: 239 ucode_start_address = PITCAIRN_SMC_UCODE_START;
240 ucode_start_address = HAINAN_SMC_UCODE_START; 240 ucode_size = PITCAIRN_SMC_UCODE_SIZE;
241 ucode_size = HAINAN_SMC_UCODE_SIZE; 241 break;
242 break; 242 case CHIP_VERDE:
243 default: 243 ucode_start_address = VERDE_SMC_UCODE_START;
244 DRM_ERROR("unknown asic in smc ucode loader\n"); 244 ucode_size = VERDE_SMC_UCODE_SIZE;
245 BUG(); 245 break;
246 case CHIP_OLAND:
247 ucode_start_address = OLAND_SMC_UCODE_START;
248 ucode_size = OLAND_SMC_UCODE_SIZE;
249 break;
250 case CHIP_HAINAN:
251 ucode_start_address = HAINAN_SMC_UCODE_START;
252 ucode_size = HAINAN_SMC_UCODE_SIZE;
253 break;
254 default:
255 DRM_ERROR("unknown asic in smc ucode loader\n");
256 BUG();
257 }
258 src = (const u8 *)rdev->smc_fw->data;
246 } 259 }
247 260
248 if (ucode_size & 3) 261 if (ucode_size & 3)
249 return -EINVAL; 262 return -EINVAL;
250 263
251 src = (const u8 *)rdev->smc_fw->data;
252 spin_lock_irqsave(&rdev->smc_idx_lock, flags); 264 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
253 WREG32(SMC_IND_INDEX_0, ucode_start_address); 265 WREG32(SMC_IND_INDEX_0, ucode_start_address);
254 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); 266 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
index 10e945a49479..623a0b1e2d9d 100644
--- a/drivers/gpu/drm/radeon/sislands_smc.h
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -241,6 +241,9 @@ typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
241#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4 241#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4
242#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC 242#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC
243#define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100 243#define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100
244#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type 0x118
245#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c
246#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120
244 247
245#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16 248#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
246#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32 249#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 792fd1d20e86..fda64b7b73e8 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -187,7 +187,7 @@ static struct drm_driver rcar_du_driver = {
187 * Power management 187 * Power management
188 */ 188 */
189 189
190#if CONFIG_PM_SLEEP 190#ifdef CONFIG_PM_SLEEP
191static int rcar_du_pm_suspend(struct device *dev) 191static int rcar_du_pm_suspend(struct device *dev)
192{ 192{
193 struct rcar_du_device *rcdu = dev_get_drvdata(dev); 193 struct rcar_du_device *rcdu = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index a87edfac111f..76026104d000 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -135,7 +135,9 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
135{ 135{
136 struct rcar_du_device *rcdu = dev->dev_private; 136 struct rcar_du_device *rcdu = dev->dev_private;
137 const struct rcar_du_format_info *format; 137 const struct rcar_du_format_info *format;
138 unsigned int max_pitch;
138 unsigned int align; 139 unsigned int align;
140 unsigned int bpp;
139 141
140 format = rcar_du_format_info(mode_cmd->pixel_format); 142 format = rcar_du_format_info(mode_cmd->pixel_format);
141 if (format == NULL) { 143 if (format == NULL) {
@@ -144,13 +146,20 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
144 return ERR_PTR(-EINVAL); 146 return ERR_PTR(-EINVAL);
145 } 147 }
146 148
149 /*
150 * The pitch and alignment constraints are expressed in pixels on the
151 * hardware side and in bytes in the DRM API.
152 */
153 bpp = format->planes == 2 ? 1 : format->bpp / 8;
154 max_pitch = 4096 * bpp;
155
147 if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B)) 156 if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
148 align = 128; 157 align = 128;
149 else 158 else
150 align = 16 * format->bpp / 8; 159 align = 16 * bpp;
151 160
152 if (mode_cmd->pitches[0] & (align - 1) || 161 if (mode_cmd->pitches[0] & (align - 1) ||
153 mode_cmd->pitches[0] >= 8192) { 162 mode_cmd->pitches[0] >= max_pitch) {
154 dev_dbg(dev->dev, "invalid pitch value %u\n", 163 dev_dbg(dev->dev, "invalid pitch value %u\n",
155 mode_cmd->pitches[0]); 164 mode_cmd->pitches[0]);
156 return ERR_PTR(-EINVAL); 165 return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 289048d1c7b2..21426bd234eb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -64,7 +64,7 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = {
64 64
65static void rcar_du_lvds_connector_destroy(struct drm_connector *connector) 65static void rcar_du_lvds_connector_destroy(struct drm_connector *connector)
66{ 66{
67 drm_sysfs_connector_remove(connector); 67 drm_connector_unregister(connector);
68 drm_connector_cleanup(connector); 68 drm_connector_cleanup(connector);
69} 69}
70 70
@@ -105,7 +105,7 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
105 return ret; 105 return ret;
106 106
107 drm_connector_helper_add(connector, &connector_helper_funcs); 107 drm_connector_helper_add(connector, &connector_helper_funcs);
108 ret = drm_sysfs_connector_add(connector); 108 ret = drm_connector_register(connector);
109 if (ret < 0) 109 if (ret < 0)
110 return ret; 110 return ret;
111 111
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index ccfe64c7188f..8af3944d31b9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -32,7 +32,7 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = {
32 32
33static void rcar_du_vga_connector_destroy(struct drm_connector *connector) 33static void rcar_du_vga_connector_destroy(struct drm_connector *connector)
34{ 34{
35 drm_sysfs_connector_remove(connector); 35 drm_connector_unregister(connector);
36 drm_connector_cleanup(connector); 36 drm_connector_cleanup(connector);
37} 37}
38 38
@@ -70,7 +70,7 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
70 return ret; 70 return ret;
71 71
72 drm_connector_helper_add(connector, &connector_helper_funcs); 72 drm_connector_helper_add(connector, &connector_helper_funcs);
73 ret = drm_sysfs_connector_add(connector); 73 ret = drm_connector_register(connector);
74 if (ret < 0) 74 if (ret < 0)
75 return ret; 75 return ret;
76 76
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index faf176b2daf9..47875de89010 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -692,7 +692,7 @@ static void shmob_drm_connector_destroy(struct drm_connector *connector)
692 struct shmob_drm_connector *scon = to_shmob_connector(connector); 692 struct shmob_drm_connector *scon = to_shmob_connector(connector);
693 693
694 shmob_drm_backlight_exit(scon); 694 shmob_drm_backlight_exit(scon);
695 drm_sysfs_connector_remove(connector); 695 drm_connector_unregister(connector);
696 drm_connector_cleanup(connector); 696 drm_connector_cleanup(connector);
697} 697}
698 698
@@ -726,7 +726,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
726 return ret; 726 return ret;
727 727
728 drm_connector_helper_add(connector, &connector_helper_funcs); 728 drm_connector_helper_add(connector, &connector_helper_funcs);
729 ret = drm_sysfs_connector_add(connector); 729 ret = drm_connector_register(connector);
730 if (ret < 0) 730 if (ret < 0)
731 goto err_cleanup; 731 goto err_cleanup;
732 732
@@ -749,7 +749,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
749err_backlight: 749err_backlight:
750 shmob_drm_backlight_exit(&sdev->connector); 750 shmob_drm_backlight_exit(&sdev->connector);
751err_sysfs: 751err_sysfs:
752 drm_sysfs_connector_remove(connector); 752 drm_connector_unregister(connector);
753err_cleanup: 753err_cleanup:
754 drm_connector_cleanup(connector); 754 drm_connector_cleanup(connector);
755 return ret; 755 return ret;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 82c84c7fd4f6..ff4ba483b602 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -297,7 +297,7 @@ static struct drm_driver shmob_drm_driver = {
297 * Power management 297 * Power management
298 */ 298 */
299 299
300#if CONFIG_PM_SLEEP 300#ifdef CONFIG_PM_SLEEP
301static int shmob_drm_pm_suspend(struct device *dev) 301static int shmob_drm_pm_suspend(struct device *dev)
302{ 302{
303 struct shmob_drm_device *sdev = dev_get_drvdata(dev); 303 struct shmob_drm_device *sdev = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
new file mode 100644
index 000000000000..2d9d4252d598
--- /dev/null
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -0,0 +1,14 @@
1config DRM_STI
2 tristate "DRM Support for STMicroelectronics SoC stiH41x Series"
3 depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM)
4 select DRM_KMS_HELPER
5 select DRM_GEM_CMA_HELPER
6 select DRM_KMS_CMA_HELPER
7 help
8 Choose this option to enable DRM on STM stiH41x chipset
9
10config DRM_STI_FBDEV
11 bool "DRM frame buffer device for STMicroelectronics SoC stiH41x Serie"
12 depends on DRM_STI
13 help
14 Choose this option to enable FBDEV on top of DRM for STM stiH41x chipset
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
new file mode 100644
index 000000000000..04ac2ceef27f
--- /dev/null
+++ b/drivers/gpu/drm/sti/Makefile
@@ -0,0 +1,21 @@
1sticompositor-y := \
2 sti_layer.o \
3 sti_mixer.o \
4 sti_gdp.o \
5 sti_vid.o \
6 sti_compositor.o \
7 sti_drm_crtc.o \
8 sti_drm_plane.o
9
10stihdmi-y := sti_hdmi.o \
11 sti_hdmi_tx3g0c55phy.o \
12 sti_hdmi_tx3g4c28phy.o \
13
14obj-$(CONFIG_DRM_STI) = \
15 sti_vtg.o \
16 sti_vtac.o \
17 stihdmi.o \
18 sti_hda.o \
19 sti_tvout.o \
20 sticompositor.o \
21 sti_drm_drv.o \ No newline at end of file
diff --git a/drivers/gpu/drm/sti/NOTES b/drivers/gpu/drm/sti/NOTES
new file mode 100644
index 000000000000..57e257969198
--- /dev/null
+++ b/drivers/gpu/drm/sti/NOTES
@@ -0,0 +1,58 @@
11. stiH display hardware IP
2---------------------------
3The STMicroelectronics stiH SoCs use a common chain of HW display IP blocks:
4- The High Quality Video Display Processor (HQVDP) gets video frames from a
5 video decoder and does high quality video processing, including scaling.
6
7- The Compositor is a multiplane, dual-mixer (Main & Aux) digital processor. It
8 has several inputs:
9 - The graphics planes are internally processed by the Generic Display
10 Pipeline (GDP).
11 - The video plug (VID) connects to the HQVDP output.
12 - The cursor handles ... a cursor.
13- The TV OUT pre-formats (convert, clip, round) the compositor output data
14- The HDMI / DVO / HD Analog / SD analog IP builds the video signals
15 - DVO (Digital Video Output) handles a 24bits parallel signal
16 - The HD analog signal is typically driven by a YCbCr cable, supporting up to
17 1080i mode.
18 - The SD analog signal is typically used for legacy TV
19- The VTG (Video Timing Generators) build Vsync signals used by the other HW IP
20Note that some stiH drivers support only a subset of thee HW IP.
21
22 .-------------. .-----------. .-----------.
23GPU >-------------+GDP Main | | +---+ HDMI +--> HDMI
24GPU >-------------+GDP mixer+---+ | :===========:
25GPU >-------------+Cursor | | +---+ DVO +--> 24b//
26 ------- | COMPOSITOR | | TV OUT | :===========:
27 | | | | | +---+ HD analog +--> YCbCr
28Vid >--+ HQVDP +--+VID Aux +---+ | :===========:
29dec | | | mixer| | +---+ SD analog +--> CVBS
30 '-------' '-------------' '-----------' '-----------'
31 .-----------.
32 | main+--> Vsync
33 | VTG |
34 | aux+--> Vsync
35 '-----------'
36
372. DRM / HW mapping
38-------------------
39These IP are mapped to the DRM objects as following:
40- The CRTCs are mapped to the Compositor Main and Aux Mixers
41- The Framebuffers and planes are mapped to the Compositor GDP (non video
42 buffers) and to HQVDP+VID (video buffers)
43- The Cursor is mapped to the Compositor Cursor
44- The Encoders are mapped to the TVOut
45- The Bridges/Connectors are mapped to the HDMI / DVO / HD Analog / SD analog
46
47FB & planes Cursor CRTC Encoders Bridges/Connectors
48 | | | | |
49 | | | | |
50 | .-------------. | .-----------. .-----------. |
51 +------------> |GDP | Main | | | +-> | | HDMI | <-+
52 +------------> |GDP v mixer|<+ | | | :===========: |
53 | |Cursor | | | +-> | | DVO | <-+
54 | ------- | COMPOSITOR | | |TV OUT | | :===========: |
55 | | | | | | | +-> | | HD analog | <-+
56 +-> | HQVDP | |VID Aux |<+ | | | :===========: |
57 | | | mixer| | +-> | | SD analog | <-+
58 '-------' '-------------' '-----------' '-----------'
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
new file mode 100644
index 000000000000..390d93e9a06c
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -0,0 +1,281 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#include <linux/component.h>
10#include <linux/module.h>
11#include <linux/platform_device.h>
12#include <linux/reset.h>
13
14#include <drm/drmP.h>
15
16#include "sti_compositor.h"
17#include "sti_drm_crtc.h"
18#include "sti_drm_drv.h"
19#include "sti_drm_plane.h"
20#include "sti_gdp.h"
21#include "sti_vtg.h"
22
23/*
24 * stiH407 compositor properties
25 */
26struct sti_compositor_data stih407_compositor_data = {
27 .nb_subdev = 6,
28 .subdev_desc = {
29 {STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100},
30 {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
31 {STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300},
32 {STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400},
33 {STI_VID_SUBDEV, (int)STI_VID_0, 0x700},
34 {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}
35 },
36};
37
38/*
39 * stiH416 compositor properties
40 * Note:
41 * on stih416 MIXER_AUX has a different base address from MIXER_MAIN
42 * Moreover, GDPx is different for Main and Aux Mixer. So this subdev map does
43 * not fit for stiH416 if we want to enable the MIXER_AUX.
44 */
45struct sti_compositor_data stih416_compositor_data = {
46 .nb_subdev = 3,
47 .subdev_desc = {
48 {STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100},
49 {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
50 {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}
51 },
52};
53
54static int sti_compositor_init_subdev(struct sti_compositor *compo,
55 struct sti_compositor_subdev_descriptor *desc,
56 unsigned int array_size)
57{
58 unsigned int i, mixer_id = 0, layer_id = 0;
59
60 for (i = 0; i < array_size; i++) {
61 switch (desc[i].type) {
62 case STI_MIXER_MAIN_SUBDEV:
63 case STI_MIXER_AUX_SUBDEV:
64 compo->mixer[mixer_id++] =
65 sti_mixer_create(compo->dev, desc[i].id,
66 compo->regs + desc[i].offset);
67 break;
68 case STI_GPD_SUBDEV:
69 case STI_VID_SUBDEV:
70 compo->layer[layer_id++] =
71 sti_layer_create(compo->dev, desc[i].id,
72 compo->regs + desc[i].offset);
73 break;
74 /* case STI_CURSOR_SUBDEV : TODO */
75 default:
76 DRM_ERROR("Unknow subdev compoment type\n");
77 return 1;
78 }
79
80 }
81 compo->nb_mixers = mixer_id;
82 compo->nb_layers = layer_id;
83
84 return 0;
85}
86
87static int sti_compositor_bind(struct device *dev, struct device *master,
88 void *data)
89{
90 struct sti_compositor *compo = dev_get_drvdata(dev);
91 struct drm_device *drm_dev = data;
92 unsigned int i, crtc = 0, plane = 0;
93 struct sti_drm_private *dev_priv = drm_dev->dev_private;
94 struct drm_plane *cursor = NULL;
95 struct drm_plane *primary = NULL;
96
97 dev_priv->compo = compo;
98
99 for (i = 0; i < compo->nb_layers; i++) {
100 if (compo->layer[i]) {
101 enum sti_layer_desc desc = compo->layer[i]->desc;
102 enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK;
103 enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
104
105 if (compo->mixer[crtc])
106 plane_type = DRM_PLANE_TYPE_PRIMARY;
107
108 switch (type) {
109 case STI_CUR:
110 cursor = sti_drm_plane_init(drm_dev,
111 compo->layer[i],
112 (1 << crtc) - 1,
113 DRM_PLANE_TYPE_CURSOR);
114 break;
115 case STI_GDP:
116 case STI_VID:
117 primary = sti_drm_plane_init(drm_dev,
118 compo->layer[i],
119 (1 << crtc) - 1, plane_type);
120 plane++;
121 break;
122 case STI_BCK:
123 break;
124 }
125
126 /* The first planes are reserved for primary planes*/
127 if (compo->mixer[crtc]) {
128 sti_drm_crtc_init(drm_dev, compo->mixer[crtc],
129 primary, cursor);
130 crtc++;
131 cursor = NULL;
132 }
133 }
134 }
135
136 drm_vblank_init(drm_dev, crtc);
137 /* Allow usage of vblank without having to call drm_irq_install */
138 drm_dev->irq_enabled = 1;
139
140 DRM_DEBUG_DRIVER("Initialized %d DRM CRTC(s) and %d DRM plane(s)\n",
141 crtc, plane);
142 DRM_DEBUG_DRIVER("DRM plane(s) for VID/VDP not created yet\n");
143
144 return 0;
145}
146
147static void sti_compositor_unbind(struct device *dev, struct device *master,
148 void *data)
149{
150 /* do nothing */
151}
152
153static const struct component_ops sti_compositor_ops = {
154 .bind = sti_compositor_bind,
155 .unbind = sti_compositor_unbind,
156};
157
158static const struct of_device_id compositor_of_match[] = {
159 {
160 .compatible = "st,stih416-compositor",
161 .data = &stih416_compositor_data,
162 }, {
163 .compatible = "st,stih407-compositor",
164 .data = &stih407_compositor_data,
165 }, {
166 /* end node */
167 }
168};
169MODULE_DEVICE_TABLE(of, compositor_of_match);
170
171static int sti_compositor_probe(struct platform_device *pdev)
172{
173 struct device *dev = &pdev->dev;
174 struct device_node *np = dev->of_node;
175 struct device_node *vtg_np;
176 struct sti_compositor *compo;
177 struct resource *res;
178 int err;
179
180 compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
181 if (!compo) {
182 DRM_ERROR("Failed to allocate compositor context\n");
183 return -ENOMEM;
184 }
185 compo->dev = dev;
186 compo->vtg_vblank_nb.notifier_call = sti_drm_crtc_vblank_cb;
187
188 /* populate data structure depending on compatibility */
189 BUG_ON(!of_match_node(compositor_of_match, np)->data);
190
191 memcpy(&compo->data, of_match_node(compositor_of_match, np)->data,
192 sizeof(struct sti_compositor_data));
193
194 /* Get Memory ressources */
195 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
196 if (res == NULL) {
197 DRM_ERROR("Get memory resource failed\n");
198 return -ENXIO;
199 }
200 compo->regs = devm_ioremap(dev, res->start, resource_size(res));
201 if (compo->regs == NULL) {
202 DRM_ERROR("Register mapping failed\n");
203 return -ENXIO;
204 }
205
206 /* Get clock resources */
207 compo->clk_compo_main = devm_clk_get(dev, "compo_main");
208 if (IS_ERR(compo->clk_compo_main)) {
209 DRM_ERROR("Cannot get compo_main clock\n");
210 return PTR_ERR(compo->clk_compo_main);
211 }
212
213 compo->clk_compo_aux = devm_clk_get(dev, "compo_aux");
214 if (IS_ERR(compo->clk_compo_aux)) {
215 DRM_ERROR("Cannot get compo_aux clock\n");
216 return PTR_ERR(compo->clk_compo_aux);
217 }
218
219 compo->clk_pix_main = devm_clk_get(dev, "pix_main");
220 if (IS_ERR(compo->clk_pix_main)) {
221 DRM_ERROR("Cannot get pix_main clock\n");
222 return PTR_ERR(compo->clk_pix_main);
223 }
224
225 compo->clk_pix_aux = devm_clk_get(dev, "pix_aux");
226 if (IS_ERR(compo->clk_pix_aux)) {
227 DRM_ERROR("Cannot get pix_aux clock\n");
228 return PTR_ERR(compo->clk_pix_aux);
229 }
230
231 /* Get reset resources */
232 compo->rst_main = devm_reset_control_get(dev, "compo-main");
233 /* Take compo main out of reset */
234 if (!IS_ERR(compo->rst_main))
235 reset_control_deassert(compo->rst_main);
236
237 compo->rst_aux = devm_reset_control_get(dev, "compo-aux");
238 /* Take compo aux out of reset */
239 if (!IS_ERR(compo->rst_aux))
240 reset_control_deassert(compo->rst_aux);
241
242 vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
243 if (vtg_np)
244 compo->vtg_main = of_vtg_find(vtg_np);
245
246 vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1);
247 if (vtg_np)
248 compo->vtg_aux = of_vtg_find(vtg_np);
249
250 /* Initialize compositor subdevices */
251 err = sti_compositor_init_subdev(compo, compo->data.subdev_desc,
252 compo->data.nb_subdev);
253 if (err)
254 return err;
255
256 platform_set_drvdata(pdev, compo);
257
258 return component_add(&pdev->dev, &sti_compositor_ops);
259}
260
261static int sti_compositor_remove(struct platform_device *pdev)
262{
263 component_del(&pdev->dev, &sti_compositor_ops);
264 return 0;
265}
266
267static struct platform_driver sti_compositor_driver = {
268 .driver = {
269 .name = "sti-compositor",
270 .owner = THIS_MODULE,
271 .of_match_table = compositor_of_match,
272 },
273 .probe = sti_compositor_probe,
274 .remove = sti_compositor_remove,
275};
276
277module_platform_driver(sti_compositor_driver);
278
279MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
280MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
281MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
new file mode 100644
index 000000000000..3ea19db72e0f
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -0,0 +1,90 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#ifndef _STI_COMPOSITOR_H_
10#define _STI_COMPOSITOR_H_
11
12#include <linux/clk.h>
13#include <linux/kernel.h>
14
15#include "sti_layer.h"
16#include "sti_mixer.h"
17
18#define WAIT_NEXT_VSYNC_MS 50 /*ms*/
19
20#define STI_MAX_LAYER 8
21#define STI_MAX_MIXER 2
22
23enum sti_compositor_subdev_type {
24 STI_MIXER_MAIN_SUBDEV,
25 STI_MIXER_AUX_SUBDEV,
26 STI_GPD_SUBDEV,
27 STI_VID_SUBDEV,
28 STI_CURSOR_SUBDEV,
29};
30
31struct sti_compositor_subdev_descriptor {
32 enum sti_compositor_subdev_type type;
33 int id;
34 unsigned int offset;
35};
36
37/**
38 * STI Compositor data structure
39 *
40 * @nb_subdev: number of subdevices supported by the compositor
41 * @subdev_desc: subdev list description
42 */
43#define MAX_SUBDEV 9
44struct sti_compositor_data {
45 unsigned int nb_subdev;
46 struct sti_compositor_subdev_descriptor subdev_desc[MAX_SUBDEV];
47};
48
49/**
50 * STI Compositor structure
51 *
52 * @dev: driver device
53 * @regs: registers (main)
54 * @data: device data
55 * @clk_compo_main: clock for main compo
56 * @clk_compo_aux: clock for aux compo
57 * @clk_pix_main: pixel clock for main path
58 * @clk_pix_aux: pixel clock for aux path
59 * @rst_main: reset control of the main path
60 * @rst_aux: reset control of the aux path
61 * @mixer: array of mixers
62 * @vtg_main: vtg for main data path
63 * @vtg_aux: vtg for auxillary data path
64 * @layer: array of layers
65 * @nb_mixers: number of mixers for this compositor
66 * @nb_layers: number of layers (GDP,VID,...) for this compositor
67 * @enable: true if compositor is enable else false
68 * @vtg_vblank_nb: callback for VTG VSYNC notification
69 */
70struct sti_compositor {
71 struct device *dev;
72 void __iomem *regs;
73 struct sti_compositor_data data;
74 struct clk *clk_compo_main;
75 struct clk *clk_compo_aux;
76 struct clk *clk_pix_main;
77 struct clk *clk_pix_aux;
78 struct reset_control *rst_main;
79 struct reset_control *rst_aux;
80 struct sti_mixer *mixer[STI_MAX_MIXER];
81 struct sti_vtg *vtg_main;
82 struct sti_vtg *vtg_aux;
83 struct sti_layer *layer[STI_MAX_LAYER];
84 int nb_mixers;
85 int nb_layers;
86 bool enable;
87 struct notifier_block vtg_vblank_nb;
88};
89
90#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c
new file mode 100644
index 000000000000..d2ae0c0e13be
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_crtc.c
@@ -0,0 +1,421 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#include <linux/clk.h>
10
11#include <drm/drmP.h>
12#include <drm/drm_crtc_helper.h>
13
14#include "sti_compositor.h"
15#include "sti_drm_drv.h"
16#include "sti_drm_crtc.h"
17#include "sti_vtg.h"
18
19static void sti_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
20{
21 DRM_DEBUG_KMS("\n");
22}
23
24static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
25{
26 struct sti_mixer *mixer = to_sti_mixer(crtc);
27 struct device *dev = mixer->dev;
28 struct sti_compositor *compo = dev_get_drvdata(dev);
29
30 compo->enable = true;
31
32 /* Prepare and enable the compo IP clock */
33 if (mixer->id == STI_MIXER_MAIN) {
34 if (clk_prepare_enable(compo->clk_compo_main))
35 DRM_INFO("Failed to prepare/enable compo_main clk\n");
36 } else {
37 if (clk_prepare_enable(compo->clk_compo_aux))
38 DRM_INFO("Failed to prepare/enable compo_aux clk\n");
39 }
40}
41
42static void sti_drm_crtc_commit(struct drm_crtc *crtc)
43{
44 struct sti_mixer *mixer = to_sti_mixer(crtc);
45 struct device *dev = mixer->dev;
46 struct sti_compositor *compo = dev_get_drvdata(dev);
47 struct sti_layer *layer;
48
49 if ((!mixer || !compo)) {
50 DRM_ERROR("Can not find mixer or compositor)\n");
51 return;
52 }
53
54 /* get GDP which is reserved to the CRTC FB */
55 layer = to_sti_layer(crtc->primary);
56 if (layer)
57 sti_layer_commit(layer);
58 else
59 DRM_ERROR("Can not find CRTC dedicated plane (GDP0)\n");
60
61 /* Enable layer on mixer */
62 if (sti_mixer_set_layer_status(mixer, layer, true))
63 DRM_ERROR("Can not enable layer at mixer\n");
64}
65
66static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc,
67 const struct drm_display_mode *mode,
68 struct drm_display_mode *adjusted_mode)
69{
70 /* accept the provided drm_display_mode, do not fix it up */
71 return true;
72}
73
74static int
75sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
76 struct drm_display_mode *adjusted_mode, int x, int y,
77 struct drm_framebuffer *old_fb)
78{
79 struct sti_mixer *mixer = to_sti_mixer(crtc);
80 struct device *dev = mixer->dev;
81 struct sti_compositor *compo = dev_get_drvdata(dev);
82 struct sti_layer *layer;
83 struct clk *clk;
84 int rate = mode->clock * 1000;
85 int res;
86 unsigned int w, h;
87
88 DRM_DEBUG_KMS("CRTC:%d (%s) fb:%d mode:%d (%s)\n",
89 crtc->base.id, sti_mixer_to_str(mixer),
90 crtc->primary->fb->base.id, mode->base.id, mode->name);
91
92 DRM_DEBUG_KMS("%d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
93 mode->vrefresh, mode->clock,
94 mode->hdisplay,
95 mode->hsync_start, mode->hsync_end,
96 mode->htotal,
97 mode->vdisplay,
98 mode->vsync_start, mode->vsync_end,
99 mode->vtotal, mode->type, mode->flags);
100
101 /* Set rate and prepare/enable pixel clock */
102 if (mixer->id == STI_MIXER_MAIN)
103 clk = compo->clk_pix_main;
104 else
105 clk = compo->clk_pix_aux;
106
107 res = clk_set_rate(clk, rate);
108 if (res < 0) {
109 DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate);
110 return -EINVAL;
111 }
112 if (clk_prepare_enable(clk)) {
113 DRM_ERROR("Failed to prepare/enable pix clk\n");
114 return -EINVAL;
115 }
116
117 sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
118 compo->vtg_main : compo->vtg_aux, &crtc->mode);
119
120 /* a GDP is reserved to the CRTC FB */
121 layer = to_sti_layer(crtc->primary);
122 if (!layer) {
123 DRM_ERROR("Can not find GDP0)\n");
124 return -EINVAL;
125 }
126
127 /* copy the mode data adjusted by mode_fixup() into crtc->mode
128 * so that hardware can be set to proper mode
129 */
130 memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
131
132 res = sti_mixer_set_layer_depth(mixer, layer);
133 if (res) {
134 DRM_ERROR("Can not set layer depth\n");
135 return -EINVAL;
136 }
137 res = sti_mixer_active_video_area(mixer, &crtc->mode);
138 if (res) {
139 DRM_ERROR("Can not set active video area\n");
140 return -EINVAL;
141 }
142
143 w = crtc->primary->fb->width - x;
144 h = crtc->primary->fb->height - y;
145
146 return sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode,
147 mixer->id, 0, 0, w, h, x, y, w, h);
148}
149
150static int sti_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
151 struct drm_framebuffer *old_fb)
152{
153 struct sti_mixer *mixer = to_sti_mixer(crtc);
154 struct sti_layer *layer;
155 unsigned int w, h;
156 int ret;
157
158 DRM_DEBUG_KMS("CRTC:%d (%s) fb:%d (%d,%d)\n",
159 crtc->base.id, sti_mixer_to_str(mixer),
160 crtc->primary->fb->base.id, x, y);
161
162 /* GDP is reserved to the CRTC FB */
163 layer = to_sti_layer(crtc->primary);
164 if (!layer) {
165 DRM_ERROR("Can not find GDP0)\n");
166 ret = -EINVAL;
167 goto out;
168 }
169
170 w = crtc->primary->fb->width - crtc->x;
171 h = crtc->primary->fb->height - crtc->y;
172
173 ret = sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode,
174 mixer->id, 0, 0, w, h,
175 crtc->x, crtc->y, w, h);
176 if (ret) {
177 DRM_ERROR("Can not prepare layer\n");
178 goto out;
179 }
180
181 sti_drm_crtc_commit(crtc);
182out:
183 return ret;
184}
185
186static void sti_drm_crtc_load_lut(struct drm_crtc *crtc)
187{
188 /* do nothing */
189}
190
191static void sti_drm_crtc_disable(struct drm_crtc *crtc)
192{
193 struct sti_mixer *mixer = to_sti_mixer(crtc);
194 struct device *dev = mixer->dev;
195 struct sti_compositor *compo = dev_get_drvdata(dev);
196 struct sti_layer *layer;
197
198 if (!compo->enable)
199 return;
200
201 DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
202
203 /* Disable Background */
204 sti_mixer_set_background_status(mixer, false);
205
206 /* Disable GDP */
207 layer = to_sti_layer(crtc->primary);
208 if (!layer) {
209 DRM_ERROR("Cannot find GDP0\n");
210 return;
211 }
212
213 /* Disable layer at mixer level */
214 if (sti_mixer_set_layer_status(mixer, layer, false))
215 DRM_ERROR("Can not disable %s layer at mixer\n",
216 sti_layer_to_str(layer));
217
218 /* Wait a while to be sure that a Vsync event is received */
219 msleep(WAIT_NEXT_VSYNC_MS);
220
221 /* Then disable layer itself */
222 sti_layer_disable(layer);
223
224 drm_vblank_off(crtc->dev, mixer->id);
225
226 /* Disable pixel clock and compo IP clocks */
227 if (mixer->id == STI_MIXER_MAIN) {
228 clk_disable_unprepare(compo->clk_pix_main);
229 clk_disable_unprepare(compo->clk_compo_main);
230 } else {
231 clk_disable_unprepare(compo->clk_pix_aux);
232 clk_disable_unprepare(compo->clk_compo_aux);
233 }
234
235 compo->enable = false;
236}
237
238static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
239 .dpms = sti_drm_crtc_dpms,
240 .prepare = sti_drm_crtc_prepare,
241 .commit = sti_drm_crtc_commit,
242 .mode_fixup = sti_drm_crtc_mode_fixup,
243 .mode_set = sti_drm_crtc_mode_set,
244 .mode_set_base = sti_drm_crtc_mode_set_base,
245 .load_lut = sti_drm_crtc_load_lut,
246 .disable = sti_drm_crtc_disable,
247};
248
249static int sti_drm_crtc_page_flip(struct drm_crtc *crtc,
250 struct drm_framebuffer *fb,
251 struct drm_pending_vblank_event *event,
252 uint32_t page_flip_flags)
253{
254 struct drm_device *drm_dev = crtc->dev;
255 struct drm_framebuffer *old_fb;
256 struct sti_mixer *mixer = to_sti_mixer(crtc);
257 unsigned long flags;
258 int ret;
259
260 DRM_DEBUG_KMS("fb %d --> fb %d\n",
261 crtc->primary->fb->base.id, fb->base.id);
262
263 mutex_lock(&drm_dev->struct_mutex);
264
265 old_fb = crtc->primary->fb;
266 crtc->primary->fb = fb;
267 ret = sti_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
268 if (ret) {
269 DRM_ERROR("failed\n");
270 crtc->primary->fb = old_fb;
271 goto out;
272 }
273
274 if (event) {
275 event->pipe = mixer->id;
276
277 ret = drm_vblank_get(drm_dev, event->pipe);
278 if (ret) {
279 DRM_ERROR("Cannot get vblank\n");
280 goto out;
281 }
282
283 spin_lock_irqsave(&drm_dev->event_lock, flags);
284 if (mixer->pending_event) {
285 drm_vblank_put(drm_dev, event->pipe);
286 ret = -EBUSY;
287 } else {
288 mixer->pending_event = event;
289 }
290 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
291 }
292out:
293 mutex_unlock(&drm_dev->struct_mutex);
294 return ret;
295}
296
297static void sti_drm_crtc_destroy(struct drm_crtc *crtc)
298{
299 DRM_DEBUG_KMS("\n");
300 drm_crtc_cleanup(crtc);
301}
302
303static int sti_drm_crtc_set_property(struct drm_crtc *crtc,
304 struct drm_property *property,
305 uint64_t val)
306{
307 DRM_DEBUG_KMS("\n");
308 return 0;
309}
310
311int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
312 unsigned long event, void *data)
313{
314 struct drm_device *drm_dev;
315 struct sti_compositor *compo =
316 container_of(nb, struct sti_compositor, vtg_vblank_nb);
317 int *crtc = data;
318 unsigned long flags;
319 struct sti_drm_private *priv;
320
321 drm_dev = compo->mixer[*crtc]->drm_crtc.dev;
322 priv = drm_dev->dev_private;
323
324 if ((event != VTG_TOP_FIELD_EVENT) &&
325 (event != VTG_BOTTOM_FIELD_EVENT)) {
326 DRM_ERROR("unknown event: %lu\n", event);
327 return -EINVAL;
328 }
329
330 drm_handle_vblank(drm_dev, *crtc);
331
332 spin_lock_irqsave(&drm_dev->event_lock, flags);
333 if (compo->mixer[*crtc]->pending_event) {
334 drm_send_vblank_event(drm_dev, -1,
335 compo->mixer[*crtc]->pending_event);
336 drm_vblank_put(drm_dev, *crtc);
337 compo->mixer[*crtc]->pending_event = NULL;
338 }
339 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
340
341 return 0;
342}
343
344int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
345{
346 struct sti_drm_private *dev_priv = dev->dev_private;
347 struct sti_compositor *compo = dev_priv->compo;
348 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
349
350 if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ?
351 compo->vtg_main : compo->vtg_aux,
352 vtg_vblank_nb, crtc)) {
353 DRM_ERROR("Cannot register VTG notifier\n");
354 return -EINVAL;
355 }
356
357 return 0;
358}
359EXPORT_SYMBOL(sti_drm_crtc_enable_vblank);
360
361void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
362{
363 struct sti_drm_private *priv = dev->dev_private;
364 struct sti_compositor *compo = priv->compo;
365 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
366 unsigned long flags;
367
368 DRM_DEBUG_DRIVER("\n");
369
370 if (sti_vtg_unregister_client(crtc == STI_MIXER_MAIN ?
371 compo->vtg_main : compo->vtg_aux, vtg_vblank_nb))
372 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
373
374 /* free the resources of the pending requests */
375 spin_lock_irqsave(&dev->event_lock, flags);
376 if (compo->mixer[crtc]->pending_event) {
377 drm_vblank_put(dev, crtc);
378 compo->mixer[crtc]->pending_event = NULL;
379 }
380 spin_unlock_irqrestore(&dev->event_lock, flags);
381
382}
383EXPORT_SYMBOL(sti_drm_crtc_disable_vblank);
384
385static struct drm_crtc_funcs sti_crtc_funcs = {
386 .set_config = drm_crtc_helper_set_config,
387 .page_flip = sti_drm_crtc_page_flip,
388 .destroy = sti_drm_crtc_destroy,
389 .set_property = sti_drm_crtc_set_property,
390};
391
392bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
393{
394 struct sti_mixer *mixer = to_sti_mixer(crtc);
395
396 if (mixer->id == STI_MIXER_MAIN)
397 return true;
398
399 return false;
400}
401
402int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
403 struct drm_plane *primary, struct drm_plane *cursor)
404{
405 struct drm_crtc *crtc = &mixer->drm_crtc;
406 int res;
407
408 res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
409 &sti_crtc_funcs);
410 if (res) {
411 DRM_ERROR("Can not initialze CRTC\n");
412 return -EINVAL;
413 }
414
415 drm_crtc_helper_add(crtc, &sti_crtc_helper_funcs);
416
417 DRM_DEBUG_DRIVER("drm CRTC:%d mapped to %s\n",
418 crtc->base.id, sti_mixer_to_str(mixer));
419
420 return 0;
421}
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.h b/drivers/gpu/drm/sti/sti_drm_crtc.h
new file mode 100644
index 000000000000..caca8b14f017
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_crtc.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_DRM_CRTC_H_
8#define _STI_DRM_CRTC_H_
9
10#include <drm/drmP.h>
11
12struct sti_mixer;
13
14int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
15 struct drm_plane *primary, struct drm_plane *cursor);
16int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
17void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
18int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
19 unsigned long event, void *data);
20bool sti_drm_crtc_is_main(struct drm_crtc *drm_crtc);
21
22#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drm_drv.c
new file mode 100644
index 000000000000..a7cc24917a96
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_drv.c
@@ -0,0 +1,241 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#include <drm/drmP.h>
8
9#include <linux/component.h>
10#include <linux/debugfs.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/of_platform.h>
14
15#include <drm/drm_crtc_helper.h>
16#include <drm/drm_gem_cma_helper.h>
17#include <drm/drm_fb_cma_helper.h>
18
19#include "sti_drm_drv.h"
20#include "sti_drm_crtc.h"
21
22#define DRIVER_NAME "sti"
23#define DRIVER_DESC "STMicroelectronics SoC DRM"
24#define DRIVER_DATE "20140601"
25#define DRIVER_MAJOR 1
26#define DRIVER_MINOR 0
27
28#define STI_MAX_FB_HEIGHT 4096
29#define STI_MAX_FB_WIDTH 4096
30
31static struct drm_mode_config_funcs sti_drm_mode_config_funcs = {
32 .fb_create = drm_fb_cma_create,
33};
34
35static void sti_drm_mode_config_init(struct drm_device *dev)
36{
37 dev->mode_config.min_width = 0;
38 dev->mode_config.min_height = 0;
39
40 /*
41 * set max width and height as default value.
42 * this value would be used to check framebuffer size limitation
43 * at drm_mode_addfb().
44 */
45 dev->mode_config.max_width = STI_MAX_FB_HEIGHT;
46 dev->mode_config.max_height = STI_MAX_FB_WIDTH;
47
48 dev->mode_config.funcs = &sti_drm_mode_config_funcs;
49}
50
51static int sti_drm_load(struct drm_device *dev, unsigned long flags)
52{
53 struct sti_drm_private *private;
54 int ret;
55
56 private = kzalloc(sizeof(struct sti_drm_private), GFP_KERNEL);
57 if (!private) {
58 DRM_ERROR("Failed to allocate private\n");
59 return -ENOMEM;
60 }
61 dev->dev_private = (void *)private;
62 private->drm_dev = dev;
63
64 drm_mode_config_init(dev);
65 drm_kms_helper_poll_init(dev);
66
67 sti_drm_mode_config_init(dev);
68
69 ret = component_bind_all(dev->dev, dev);
70 if (ret)
71 return ret;
72
73 drm_helper_disable_unused_functions(dev);
74
75#ifdef CONFIG_DRM_STI_FBDEV
76 drm_fbdev_cma_init(dev, 32,
77 dev->mode_config.num_crtc,
78 dev->mode_config.num_connector);
79#endif
80 return 0;
81}
82
83static const struct file_operations sti_drm_driver_fops = {
84 .owner = THIS_MODULE,
85 .open = drm_open,
86 .mmap = drm_gem_cma_mmap,
87 .poll = drm_poll,
88 .read = drm_read,
89 .unlocked_ioctl = drm_ioctl,
90#ifdef CONFIG_COMPAT
91 .compat_ioctl = drm_compat_ioctl,
92#endif
93 .release = drm_release,
94};
95
96static struct dma_buf *sti_drm_gem_prime_export(struct drm_device *dev,
97 struct drm_gem_object *obj,
98 int flags)
99{
100 /* we want to be able to write in mmapped buffer */
101 flags |= O_RDWR;
102 return drm_gem_prime_export(dev, obj, flags);
103}
104
105static struct drm_driver sti_drm_driver = {
106 .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
107 DRIVER_GEM | DRIVER_PRIME,
108 .load = sti_drm_load,
109 .gem_free_object = drm_gem_cma_free_object,
110 .gem_vm_ops = &drm_gem_cma_vm_ops,
111 .dumb_create = drm_gem_cma_dumb_create,
112 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
113 .dumb_destroy = drm_gem_dumb_destroy,
114 .fops = &sti_drm_driver_fops,
115
116 .get_vblank_counter = drm_vblank_count,
117 .enable_vblank = sti_drm_crtc_enable_vblank,
118 .disable_vblank = sti_drm_crtc_disable_vblank,
119
120 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
121 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
122 .gem_prime_export = sti_drm_gem_prime_export,
123 .gem_prime_import = drm_gem_prime_import,
124 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
125 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
126 .gem_prime_vmap = drm_gem_cma_prime_vmap,
127 .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
128 .gem_prime_mmap = drm_gem_cma_prime_mmap,
129
130 .name = DRIVER_NAME,
131 .desc = DRIVER_DESC,
132 .date = DRIVER_DATE,
133 .major = DRIVER_MAJOR,
134 .minor = DRIVER_MINOR,
135};
136
137static int compare_of(struct device *dev, void *data)
138{
139 return dev->of_node == data;
140}
141
142static int sti_drm_bind(struct device *dev)
143{
144 return drm_platform_init(&sti_drm_driver, to_platform_device(dev));
145}
146
147static void sti_drm_unbind(struct device *dev)
148{
149 drm_put_dev(dev_get_drvdata(dev));
150}
151
152static const struct component_master_ops sti_drm_ops = {
153 .bind = sti_drm_bind,
154 .unbind = sti_drm_unbind,
155};
156
157static int sti_drm_master_probe(struct platform_device *pdev)
158{
159 struct device *dev = &pdev->dev;
160 struct device_node *node = dev->parent->of_node;
161 struct device_node *child_np;
162 struct component_match *match = NULL;
163
164 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
165
166 child_np = of_get_next_available_child(node, NULL);
167
168 while (child_np) {
169 component_match_add(dev, &match, compare_of, child_np);
170 of_node_put(child_np);
171 child_np = of_get_next_available_child(node, child_np);
172 }
173
174 return component_master_add_with_match(dev, &sti_drm_ops, match);
175}
176
177static int sti_drm_master_remove(struct platform_device *pdev)
178{
179 component_master_del(&pdev->dev, &sti_drm_ops);
180 return 0;
181}
182
183static struct platform_driver sti_drm_master_driver = {
184 .probe = sti_drm_master_probe,
185 .remove = sti_drm_master_remove,
186 .driver = {
187 .owner = THIS_MODULE,
188 .name = DRIVER_NAME "__master",
189 },
190};
191
192static int sti_drm_platform_probe(struct platform_device *pdev)
193{
194 struct device *dev = &pdev->dev;
195 struct device_node *node = dev->of_node;
196 struct platform_device *master;
197
198 of_platform_populate(node, NULL, NULL, dev);
199
200 platform_driver_register(&sti_drm_master_driver);
201 master = platform_device_register_resndata(dev,
202 DRIVER_NAME "__master", -1,
203 NULL, 0, NULL, 0);
204 if (!master)
205 return -EINVAL;
206
207 platform_set_drvdata(pdev, master);
208 return 0;
209}
210
211static int sti_drm_platform_remove(struct platform_device *pdev)
212{
213 struct platform_device *master = platform_get_drvdata(pdev);
214
215 of_platform_depopulate(&pdev->dev);
216 platform_device_unregister(master);
217 platform_driver_unregister(&sti_drm_master_driver);
218 return 0;
219}
220
221static const struct of_device_id sti_drm_dt_ids[] = {
222 { .compatible = "st,sti-display-subsystem", },
223 { /* end node */ },
224};
225MODULE_DEVICE_TABLE(of, sti_drm_dt_ids);
226
227static struct platform_driver sti_drm_platform_driver = {
228 .probe = sti_drm_platform_probe,
229 .remove = sti_drm_platform_remove,
230 .driver = {
231 .owner = THIS_MODULE,
232 .name = DRIVER_NAME,
233 .of_match_table = sti_drm_dt_ids,
234 },
235};
236
237module_platform_driver(sti_drm_platform_driver);
238
239MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
240MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
241MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.h b/drivers/gpu/drm/sti/sti_drm_drv.h
new file mode 100644
index 000000000000..ec5e2eb8dff9
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_drv.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_DRM_DRV_H_
8#define _STI_DRM_DRV_H_
9
10#include <drm/drmP.h>
11
12struct sti_compositor;
13struct sti_tvout;
14
15/**
16 * STI drm private structure
17 * This structure is stored as private in the drm_device
18 *
19 * @compo: compositor
20 * @plane_zorder_property: z-order property for CRTC planes
21 * @drm_dev: drm device
22 */
23struct sti_drm_private {
24 struct sti_compositor *compo;
25 struct drm_property *plane_zorder_property;
26 struct drm_device *drm_dev;
27};
28
29#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c
new file mode 100644
index 000000000000..f4118d4cac22
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_plane.c
@@ -0,0 +1,195 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#include "sti_compositor.h"
10#include "sti_drm_drv.h"
11#include "sti_drm_plane.h"
12#include "sti_vtg.h"
13
14enum sti_layer_desc sti_layer_default_zorder[] = {
15 STI_GDP_0,
16 STI_VID_0,
17 STI_GDP_1,
18 STI_VID_1,
19 STI_GDP_2,
20 STI_GDP_3,
21};
22
23/* (Background) < GDP0 < VID0 < GDP1 < VID1 < GDP2 < GDP3 < (ForeGround) */
24
25static int
26sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
27 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
28 unsigned int crtc_w, unsigned int crtc_h,
29 uint32_t src_x, uint32_t src_y,
30 uint32_t src_w, uint32_t src_h)
31{
32 struct sti_layer *layer = to_sti_layer(plane);
33 struct sti_mixer *mixer = to_sti_mixer(crtc);
34 int res;
35
36 DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s) drm fb:%d\n",
37 crtc->base.id, sti_mixer_to_str(mixer),
38 plane->base.id, sti_layer_to_str(layer), fb->base.id);
39 DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y);
40
41 res = sti_mixer_set_layer_depth(mixer, layer);
42 if (res) {
43 DRM_ERROR("Can not set layer depth\n");
44 return res;
45 }
46
47 /* src_x are in 16.16 format. */
48 res = sti_layer_prepare(layer, fb, &crtc->mode, mixer->id,
49 crtc_x, crtc_y, crtc_w, crtc_h,
50 src_x >> 16, src_y >> 16,
51 src_w >> 16, src_h >> 16);
52 if (res) {
53 DRM_ERROR("Layer prepare failed\n");
54 return res;
55 }
56
57 res = sti_layer_commit(layer);
58 if (res) {
59 DRM_ERROR("Layer commit failed\n");
60 return res;
61 }
62
63 res = sti_mixer_set_layer_status(mixer, layer, true);
64 if (res) {
65 DRM_ERROR("Can not enable layer at mixer\n");
66 return res;
67 }
68
69 return 0;
70}
71
72static int sti_drm_disable_plane(struct drm_plane *plane)
73{
74 struct sti_layer *layer;
75 struct sti_mixer *mixer;
76 int lay_res, mix_res;
77
78 if (!plane->crtc) {
79 DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", plane->base.id);
80 return 0;
81 }
82 layer = to_sti_layer(plane);
83 mixer = to_sti_mixer(plane->crtc);
84
85 DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
86 plane->crtc->base.id, sti_mixer_to_str(mixer),
87 plane->base.id, sti_layer_to_str(layer));
88
89 /* Disable layer at mixer level */
90 mix_res = sti_mixer_set_layer_status(mixer, layer, false);
91 if (mix_res)
92 DRM_ERROR("Can not disable layer at mixer\n");
93
94 /* Wait a while to be sure that a Vsync event is received */
95 msleep(WAIT_NEXT_VSYNC_MS);
96
97 /* Then disable layer itself */
98 lay_res = sti_layer_disable(layer);
99 if (lay_res)
100 DRM_ERROR("Layer disable failed\n");
101
102 if (lay_res || mix_res)
103 return -EINVAL;
104
105 return 0;
106}
107
108static void sti_drm_plane_destroy(struct drm_plane *plane)
109{
110 DRM_DEBUG_DRIVER("\n");
111
112 sti_drm_disable_plane(plane);
113 drm_plane_cleanup(plane);
114}
115
116static int sti_drm_plane_set_property(struct drm_plane *plane,
117 struct drm_property *property,
118 uint64_t val)
119{
120 struct drm_device *dev = plane->dev;
121 struct sti_drm_private *private = dev->dev_private;
122 struct sti_layer *layer = to_sti_layer(plane);
123
124 DRM_DEBUG_DRIVER("\n");
125
126 if (property == private->plane_zorder_property) {
127 layer->zorder = val;
128 return 0;
129 }
130
131 return -EINVAL;
132}
133
134static struct drm_plane_funcs sti_drm_plane_funcs = {
135 .update_plane = sti_drm_update_plane,
136 .disable_plane = sti_drm_disable_plane,
137 .destroy = sti_drm_plane_destroy,
138 .set_property = sti_drm_plane_set_property,
139};
140
141static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane,
142 uint64_t default_val)
143{
144 struct drm_device *dev = plane->dev;
145 struct sti_drm_private *private = dev->dev_private;
146 struct drm_property *prop;
147 struct sti_layer *layer = to_sti_layer(plane);
148
149 prop = private->plane_zorder_property;
150 if (!prop) {
151 prop = drm_property_create_range(dev, 0, "zpos", 0,
152 GAM_MIXER_NB_DEPTH_LEVEL - 1);
153 if (!prop)
154 return;
155
156 private->plane_zorder_property = prop;
157 }
158
159 drm_object_attach_property(&plane->base, prop, default_val);
160 layer->zorder = default_val;
161}
162
163struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
164 struct sti_layer *layer,
165 unsigned int possible_crtcs,
166 enum drm_plane_type type)
167{
168 int err, i;
169 uint64_t default_zorder = 0;
170
171 err = drm_universal_plane_init(dev, &layer->plane, possible_crtcs,
172 &sti_drm_plane_funcs,
173 sti_layer_get_formats(layer),
174 sti_layer_get_nb_formats(layer), type);
175 if (err) {
176 DRM_ERROR("Failed to initialize plane\n");
177 return NULL;
178 }
179
180 for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++)
181 if (sti_layer_default_zorder[i] == layer->desc)
182 break;
183
184 default_zorder = i;
185
186 if (type == DRM_PLANE_TYPE_OVERLAY)
187 sti_drm_plane_attach_zorder_property(&layer->plane,
188 default_zorder);
189
190 DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%llu\n",
191 layer->plane.base.id,
192 sti_layer_to_str(layer), default_zorder);
193
194 return &layer->plane;
195}
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.h b/drivers/gpu/drm/sti/sti_drm_plane.h
new file mode 100644
index 000000000000..4f191839f2a7
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_plane.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_DRM_PLANE_H_
8#define _STI_DRM_PLANE_H_
9
10#include <drm/drmP.h>
11
12struct sti_layer;
13
14struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
15 struct sti_layer *layer,
16 unsigned int possible_crtcs,
17 enum drm_plane_type type);
18#endif
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
new file mode 100644
index 000000000000..4e30b74559f5
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -0,0 +1,549 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#include <linux/clk.h>
10#include <linux/dma-mapping.h>
11
12#include "sti_compositor.h"
13#include "sti_gdp.h"
14#include "sti_layer.h"
15#include "sti_vtg.h"
16
17#define ENA_COLOR_FILL BIT(8)
18#define WAIT_NEXT_VSYNC BIT(31)
19
20/* GDP color formats */
21#define GDP_RGB565 0x00
22#define GDP_RGB888 0x01
23#define GDP_RGB888_32 0x02
24#define GDP_ARGB8565 0x04
25#define GDP_ARGB8888 0x05
26#define GDP_ARGB1555 0x06
27#define GDP_ARGB4444 0x07
28#define GDP_CLUT8 0x0B
29#define GDP_YCBR888 0x10
30#define GDP_YCBR422R 0x12
31#define GDP_AYCBR8888 0x15
32
33#define GAM_GDP_CTL_OFFSET 0x00
34#define GAM_GDP_AGC_OFFSET 0x04
35#define GAM_GDP_VPO_OFFSET 0x0C
36#define GAM_GDP_VPS_OFFSET 0x10
37#define GAM_GDP_PML_OFFSET 0x14
38#define GAM_GDP_PMP_OFFSET 0x18
39#define GAM_GDP_SIZE_OFFSET 0x1C
40#define GAM_GDP_NVN_OFFSET 0x24
41#define GAM_GDP_KEY1_OFFSET 0x28
42#define GAM_GDP_KEY2_OFFSET 0x2C
43#define GAM_GDP_PPT_OFFSET 0x34
44#define GAM_GDP_CML_OFFSET 0x3C
45#define GAM_GDP_MST_OFFSET 0x68
46
47#define GAM_GDP_ALPHARANGE_255 BIT(5)
48#define GAM_GDP_AGC_FULL_RANGE 0x00808080
49#define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
50#define GAM_GDP_SIZE_MAX 0x7FF
51
52#define GDP_NODE_NB_BANK 2
53#define GDP_NODE_PER_FIELD 2
54
55struct sti_gdp_node {
56 u32 gam_gdp_ctl;
57 u32 gam_gdp_agc;
58 u32 reserved1;
59 u32 gam_gdp_vpo;
60 u32 gam_gdp_vps;
61 u32 gam_gdp_pml;
62 u32 gam_gdp_pmp;
63 u32 gam_gdp_size;
64 u32 reserved2;
65 u32 gam_gdp_nvn;
66 u32 gam_gdp_key1;
67 u32 gam_gdp_key2;
68 u32 reserved3;
69 u32 gam_gdp_ppt;
70 u32 reserved4;
71 u32 gam_gdp_cml;
72};
73
74struct sti_gdp_node_list {
75 struct sti_gdp_node *top_field;
76 struct sti_gdp_node *btm_field;
77};
78
79/**
80 * STI GDP structure
81 *
82 * @layer: layer structure
83 * @clk_pix: pixel clock for the current gdp
84 * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
85 * @is_curr_top: true if the current node processed is the top field
86 * @node_list: array of node list
87 */
88struct sti_gdp {
89 struct sti_layer layer;
90 struct clk *clk_pix;
91 struct notifier_block vtg_field_nb;
92 bool is_curr_top;
93 struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
94};
95
96#define to_sti_gdp(x) container_of(x, struct sti_gdp, layer)
97
98static const uint32_t gdp_supported_formats[] = {
99 DRM_FORMAT_XRGB8888,
100 DRM_FORMAT_ARGB8888,
101 DRM_FORMAT_ARGB4444,
102 DRM_FORMAT_ARGB1555,
103 DRM_FORMAT_RGB565,
104 DRM_FORMAT_RGB888,
105 DRM_FORMAT_AYUV,
106 DRM_FORMAT_YUV444,
107 DRM_FORMAT_VYUY,
108 DRM_FORMAT_C8,
109};
110
111static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer)
112{
113 return gdp_supported_formats;
114}
115
116static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer)
117{
118 return ARRAY_SIZE(gdp_supported_formats);
119}
120
121static int sti_gdp_fourcc2format(int fourcc)
122{
123 switch (fourcc) {
124 case DRM_FORMAT_XRGB8888:
125 return GDP_RGB888_32;
126 case DRM_FORMAT_ARGB8888:
127 return GDP_ARGB8888;
128 case DRM_FORMAT_ARGB4444:
129 return GDP_ARGB4444;
130 case DRM_FORMAT_ARGB1555:
131 return GDP_ARGB1555;
132 case DRM_FORMAT_RGB565:
133 return GDP_RGB565;
134 case DRM_FORMAT_RGB888:
135 return GDP_RGB888;
136 case DRM_FORMAT_AYUV:
137 return GDP_AYCBR8888;
138 case DRM_FORMAT_YUV444:
139 return GDP_YCBR888;
140 case DRM_FORMAT_VYUY:
141 return GDP_YCBR422R;
142 case DRM_FORMAT_C8:
143 return GDP_CLUT8;
144 }
145 return -1;
146}
147
148static int sti_gdp_get_alpharange(int format)
149{
150 switch (format) {
151 case GDP_ARGB8565:
152 case GDP_ARGB8888:
153 case GDP_AYCBR8888:
154 return GAM_GDP_ALPHARANGE_255;
155 }
156 return 0;
157}
158
159/**
160 * sti_gdp_get_free_nodes
161 * @layer: gdp layer
162 *
163 * Look for a GDP node list that is not currently read by the HW.
164 *
165 * RETURNS:
166 * Pointer to the free GDP node list
167 */
168static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
169{
170 int hw_nvn;
171 void *virt_nvn;
172 struct sti_gdp *gdp = to_sti_gdp(layer);
173 unsigned int i;
174
175 hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
176 if (!hw_nvn)
177 goto end;
178
179 virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
180
181 for (i = 0; i < GDP_NODE_NB_BANK; i++)
182 if ((virt_nvn != gdp->node_list[i].btm_field) &&
183 (virt_nvn != gdp->node_list[i].top_field))
184 return &gdp->node_list[i];
185
186 /* in hazardious cases restart with the first node */
187 DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
188 sti_layer_to_str(layer), hw_nvn);
189
190end:
191 return &gdp->node_list[0];
192}
193
194/**
195 * sti_gdp_get_current_nodes
196 * @layer: GDP layer
197 *
198 * Look for GDP nodes that are currently read by the HW.
199 *
200 * RETURNS:
201 * Pointer to the current GDP node list
202 */
203static
204struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
205{
206 int hw_nvn;
207 void *virt_nvn;
208 struct sti_gdp *gdp = to_sti_gdp(layer);
209 unsigned int i;
210
211 hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
212 if (!hw_nvn)
213 goto end;
214
215 virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
216
217 for (i = 0; i < GDP_NODE_NB_BANK; i++)
218 if ((virt_nvn == gdp->node_list[i].btm_field) ||
219 (virt_nvn == gdp->node_list[i].top_field))
220 return &gdp->node_list[i];
221
222end:
223 DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
224 hw_nvn, sti_layer_to_str(layer));
225
226 return NULL;
227}
228
229/**
230 * sti_gdp_prepare_layer
231 * @lay: gdp layer
232 * @first_prepare: true if it is the first time this function is called
233 *
234 * Update the free GDP node list according to the layer properties.
235 *
236 * RETURNS:
237 * 0 on success.
238 */
239static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
240{
241 struct sti_gdp_node_list *list;
242 struct sti_gdp_node *top_field, *btm_field;
243 struct drm_display_mode *mode = layer->mode;
244 struct device *dev = layer->dev;
245 struct sti_gdp *gdp = to_sti_gdp(layer);
246 struct sti_compositor *compo = dev_get_drvdata(dev);
247 int format;
248 unsigned int depth, bpp;
249 int rate = mode->clock * 1000;
250 int res;
251 u32 ydo, xdo, yds, xds;
252
253 list = sti_gdp_get_free_nodes(layer);
254 top_field = list->top_field;
255 btm_field = list->btm_field;
256
257 dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
258 sti_layer_to_str(layer), top_field, btm_field);
259
260 /* Build the top field from layer params */
261 top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
262 top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
263 format = sti_gdp_fourcc2format(layer->format);
264 if (format == -1) {
265 DRM_ERROR("Format not supported by GDP %.4s\n",
266 (char *)&layer->format);
267 return 1;
268 }
269 top_field->gam_gdp_ctl |= format;
270 top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
271 top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
272
273 /* pixel memory location */
274 drm_fb_get_bpp_depth(layer->format, &depth, &bpp);
275 top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0];
276 top_field->gam_gdp_pml += layer->src_x * (bpp >> 3);
277 top_field->gam_gdp_pml += layer->src_y * layer->pitches[0];
278
279 /* input parameters */
280 top_field->gam_gdp_pmp = layer->pitches[0];
281 top_field->gam_gdp_size =
282 clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
283 clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX);
284
285 /* output parameters */
286 ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
287 yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1);
288 xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x);
289 xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1);
290 top_field->gam_gdp_vpo = (ydo << 16) | xdo;
291 top_field->gam_gdp_vps = (yds << 16) | xds;
292
293 /* Same content and chained together */
294 memcpy(btm_field, top_field, sizeof(*btm_field));
295 top_field->gam_gdp_nvn = virt_to_dma(dev, btm_field);
296 btm_field->gam_gdp_nvn = virt_to_dma(dev, top_field);
297
298 /* Interlaced mode */
299 if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
300 btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
301 layer->pitches[0];
302
303 if (first_prepare) {
304 /* Register gdp callback */
305 if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ?
306 compo->vtg_main : compo->vtg_aux,
307 &gdp->vtg_field_nb, layer->mixer_id)) {
308 DRM_ERROR("Cannot register VTG notifier\n");
309 return 1;
310 }
311
312 /* Set and enable gdp clock */
313 if (gdp->clk_pix) {
314 res = clk_set_rate(gdp->clk_pix, rate);
315 if (res < 0) {
316 DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
317 rate);
318 return 1;
319 }
320
321 if (clk_prepare_enable(gdp->clk_pix)) {
322 DRM_ERROR("Failed to prepare/enable gdp\n");
323 return 1;
324 }
325 }
326 }
327
328 return 0;
329}
330
331/**
332 * sti_gdp_commit_layer
333 * @lay: gdp layer
334 *
335 * Update the NVN field of the 'right' field of the current GDP node (being
336 * used by the HW) with the address of the updated ('free') top field GDP node.
337 * - In interlaced mode the 'right' field is the bottom field as we update
338 * frames starting from their top field
339 * - In progressive mode, we update both bottom and top fields which are
340 * equal nodes.
341 * At the next VSYNC, the updated node list will be used by the HW.
342 *
343 * RETURNS:
344 * 0 on success.
345 */
346static int sti_gdp_commit_layer(struct sti_layer *layer)
347{
348 struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer);
349 struct sti_gdp_node *updated_top_node = updated_list->top_field;
350 struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
351 struct sti_gdp *gdp = to_sti_gdp(layer);
352 u32 dma_updated_top = virt_to_dma(layer->dev, updated_top_node);
353 u32 dma_updated_btm = virt_to_dma(layer->dev, updated_btm_node);
354 struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
355
356 dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
357 sti_layer_to_str(layer),
358 updated_top_node, updated_btm_node);
359 dev_dbg(layer->dev, "Current NVN:0x%X\n",
360 readl(layer->regs + GAM_GDP_NVN_OFFSET));
361 dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n",
362 (unsigned long)layer->paddr,
363 readl(layer->regs + GAM_GDP_PML_OFFSET));
364
365 if (curr_list == NULL) {
366 /* First update or invalid node should directly write in the
367 * hw register */
368 DRM_DEBUG_DRIVER("%s first update (or invalid node)",
369 sti_layer_to_str(layer));
370
371 writel(gdp->is_curr_top == true ?
372 dma_updated_btm : dma_updated_top,
373 layer->regs + GAM_GDP_NVN_OFFSET);
374 return 0;
375 }
376
377 if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) {
378 if (gdp->is_curr_top == true) {
379 /* Do not update in the middle of the frame, but
380 * postpone the update after the bottom field has
381 * been displayed */
382 curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
383 } else {
384 /* Direct update to avoid one frame delay */
385 writel(dma_updated_top,
386 layer->regs + GAM_GDP_NVN_OFFSET);
387 }
388 } else {
389 /* Direct update for progressive to avoid one frame delay */
390 writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET);
391 }
392
393 return 0;
394}
395
396/**
397 * sti_gdp_disable_layer
398 * @lay: gdp layer
399 *
400 * Disable a GDP.
401 *
402 * RETURNS:
403 * 0 on success.
404 */
405static int sti_gdp_disable_layer(struct sti_layer *layer)
406{
407 unsigned int i;
408 struct sti_gdp *gdp = to_sti_gdp(layer);
409 struct sti_compositor *compo = dev_get_drvdata(layer->dev);
410
411 DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
412
413 /* Set the nodes as 'to be ignored on mixer' */
414 for (i = 0; i < GDP_NODE_NB_BANK; i++) {
415 gdp->node_list[i].top_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
416 gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
417 }
418
419 if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ?
420 compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
421 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
422
423 if (gdp->clk_pix)
424 clk_disable_unprepare(gdp->clk_pix);
425
426 return 0;
427}
428
429/**
430 * sti_gdp_field_cb
431 * @nb: notifier block
432 * @event: event message
433 * @data: private data
434 *
435 * Handle VTG top field and bottom field event.
436 *
437 * RETURNS:
438 * 0 on success.
439 */
440int sti_gdp_field_cb(struct notifier_block *nb,
441 unsigned long event, void *data)
442{
443 struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
444
445 switch (event) {
446 case VTG_TOP_FIELD_EVENT:
447 gdp->is_curr_top = true;
448 break;
449 case VTG_BOTTOM_FIELD_EVENT:
450 gdp->is_curr_top = false;
451 break;
452 default:
453 DRM_ERROR("unsupported event: %lu\n", event);
454 break;
455 }
456
457 return 0;
458}
459
460static void sti_gdp_init(struct sti_layer *layer)
461{
462 struct sti_gdp *gdp = to_sti_gdp(layer);
463 struct device_node *np = layer->dev->of_node;
464 dma_addr_t dma;
465 void *base;
466 unsigned int i, size;
467
468 /* Allocate all the nodes within a single memory page */
469 size = sizeof(struct sti_gdp_node) *
470 GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
471
472 base = dma_alloc_writecombine(layer->dev,
473 size, &dma, GFP_KERNEL | GFP_DMA);
474 if (!base) {
475 DRM_ERROR("Failed to allocate memory for GDP node\n");
476 return;
477 }
478 memset(base, 0, size);
479
480 for (i = 0; i < GDP_NODE_NB_BANK; i++) {
481 if (virt_to_dma(layer->dev, base) & 0xF) {
482 DRM_ERROR("Mem alignment failed\n");
483 return;
484 }
485 gdp->node_list[i].top_field = base;
486 DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
487 base += sizeof(struct sti_gdp_node);
488
489 if (virt_to_dma(layer->dev, base) & 0xF) {
490 DRM_ERROR("Mem alignment failed\n");
491 return;
492 }
493 gdp->node_list[i].btm_field = base;
494 DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
495 base += sizeof(struct sti_gdp_node);
496 }
497
498 if (of_device_is_compatible(np, "st,stih407-compositor")) {
499 /* GDP of STiH407 chip have its own pixel clock */
500 char *clk_name;
501
502 switch (layer->desc) {
503 case STI_GDP_0:
504 clk_name = "pix_gdp1";
505 break;
506 case STI_GDP_1:
507 clk_name = "pix_gdp2";
508 break;
509 case STI_GDP_2:
510 clk_name = "pix_gdp3";
511 break;
512 case STI_GDP_3:
513 clk_name = "pix_gdp4";
514 break;
515 default:
516 DRM_ERROR("GDP id not recognized\n");
517 return;
518 }
519
520 gdp->clk_pix = devm_clk_get(layer->dev, clk_name);
521 if (IS_ERR(gdp->clk_pix))
522 DRM_ERROR("Cannot get %s clock\n", clk_name);
523 }
524}
525
526static const struct sti_layer_funcs gdp_ops = {
527 .get_formats = sti_gdp_get_formats,
528 .get_nb_formats = sti_gdp_get_nb_formats,
529 .init = sti_gdp_init,
530 .prepare = sti_gdp_prepare_layer,
531 .commit = sti_gdp_commit_layer,
532 .disable = sti_gdp_disable_layer,
533};
534
535struct sti_layer *sti_gdp_create(struct device *dev, int id)
536{
537 struct sti_gdp *gdp;
538
539 gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
540 if (!gdp) {
541 DRM_ERROR("Failed to allocate memory for GDP\n");
542 return NULL;
543 }
544
545 gdp->layer.ops = &gdp_ops;
546 gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
547
548 return (struct sti_layer *)gdp;
549}
diff --git a/drivers/gpu/drm/sti/sti_gdp.h b/drivers/gpu/drm/sti/sti_gdp.h
new file mode 100644
index 000000000000..1dab68274ad3
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_gdp.h
@@ -0,0 +1,16 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#ifndef _STI_GDP_H_
10#define _STI_GDP_H_
11
12#include <linux/types.h>
13
14struct sti_layer *sti_gdp_create(struct device *dev, int id);
15
16#endif
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
new file mode 100644
index 000000000000..72d957f81c05
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -0,0 +1,794 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#include <linux/clk.h>
8#include <linux/component.h>
9#include <linux/module.h>
10#include <linux/platform_device.h>
11
12#include <drm/drmP.h>
13#include <drm/drm_crtc_helper.h>
14
15/* HDformatter registers */
16#define HDA_ANA_CFG 0x0000
17#define HDA_ANA_SCALE_CTRL_Y 0x0004
18#define HDA_ANA_SCALE_CTRL_CB 0x0008
19#define HDA_ANA_SCALE_CTRL_CR 0x000C
20#define HDA_ANA_ANC_CTRL 0x0010
21#define HDA_ANA_SRC_Y_CFG 0x0014
22#define HDA_COEFF_Y_PH1_TAP123 0x0018
23#define HDA_COEFF_Y_PH1_TAP456 0x001C
24#define HDA_COEFF_Y_PH2_TAP123 0x0020
25#define HDA_COEFF_Y_PH2_TAP456 0x0024
26#define HDA_COEFF_Y_PH3_TAP123 0x0028
27#define HDA_COEFF_Y_PH3_TAP456 0x002C
28#define HDA_COEFF_Y_PH4_TAP123 0x0030
29#define HDA_COEFF_Y_PH4_TAP456 0x0034
30#define HDA_ANA_SRC_C_CFG 0x0040
31#define HDA_COEFF_C_PH1_TAP123 0x0044
32#define HDA_COEFF_C_PH1_TAP456 0x0048
33#define HDA_COEFF_C_PH2_TAP123 0x004C
34#define HDA_COEFF_C_PH2_TAP456 0x0050
35#define HDA_COEFF_C_PH3_TAP123 0x0054
36#define HDA_COEFF_C_PH3_TAP456 0x0058
37#define HDA_COEFF_C_PH4_TAP123 0x005C
38#define HDA_COEFF_C_PH4_TAP456 0x0060
39#define HDA_SYNC_AWGI 0x0300
40
41/* HDA_ANA_CFG */
42#define CFG_AWG_ASYNC_EN BIT(0)
43#define CFG_AWG_ASYNC_HSYNC_MTD BIT(1)
44#define CFG_AWG_ASYNC_VSYNC_MTD BIT(2)
45#define CFG_AWG_SYNC_DEL BIT(3)
46#define CFG_AWG_FLTR_MODE_SHIFT 4
47#define CFG_AWG_FLTR_MODE_MASK (0xF << CFG_AWG_FLTR_MODE_SHIFT)
48#define CFG_AWG_FLTR_MODE_SD (0 << CFG_AWG_FLTR_MODE_SHIFT)
49#define CFG_AWG_FLTR_MODE_ED (1 << CFG_AWG_FLTR_MODE_SHIFT)
50#define CFG_AWG_FLTR_MODE_HD (2 << CFG_AWG_FLTR_MODE_SHIFT)
51#define CFG_SYNC_ON_PBPR_MASK BIT(8)
52#define CFG_PREFILTER_EN_MASK BIT(9)
53#define CFG_PBPR_SYNC_OFF_SHIFT 16
54#define CFG_PBPR_SYNC_OFF_MASK (0x7FF << CFG_PBPR_SYNC_OFF_SHIFT)
55#define CFG_PBPR_SYNC_OFF_VAL 0x117 /* Voltage dependent. stiH416 */
56
57/* Default scaling values */
58#define SCALE_CTRL_Y_DFLT 0x00C50256
59#define SCALE_CTRL_CB_DFLT 0x00DB0249
60#define SCALE_CTRL_CR_DFLT 0x00DB0249
61
62/* Video DACs control */
63#define VIDEO_DACS_CONTROL_MASK 0x0FFF
64#define VIDEO_DACS_CONTROL_SYSCFG2535 0x085C /* for stih416 */
65#define DAC_CFG_HD_OFF_SHIFT 5
66#define DAC_CFG_HD_OFF_MASK (0x7 << DAC_CFG_HD_OFF_SHIFT)
67#define VIDEO_DACS_CONTROL_SYSCFG5072 0x0120 /* for stih407 */
68#define DAC_CFG_HD_HZUVW_OFF_MASK BIT(1)
69
70
71/* Upsampler values for the alternative 2X Filter */
72#define SAMPLER_COEF_NB 8
73#define HDA_ANA_SRC_Y_CFG_ALT_2X 0x01130000
74static u32 coef_y_alt_2x[] = {
75 0x00FE83FB, 0x1F900401, 0x00000000, 0x00000000,
76 0x00F408F9, 0x055F7C25, 0x00000000, 0x00000000
77};
78
79#define HDA_ANA_SRC_C_CFG_ALT_2X 0x01750004
80static u32 coef_c_alt_2x[] = {
81 0x001305F7, 0x05274BD0, 0x00000000, 0x00000000,
82 0x0004907C, 0x09C80B9D, 0x00000000, 0x00000000
83};
84
85/* Upsampler values for the 4X Filter */
86#define HDA_ANA_SRC_Y_CFG_4X 0x01ED0005
87#define HDA_ANA_SRC_C_CFG_4X 0x01ED0004
88static u32 coef_yc_4x[] = {
89 0x00FC827F, 0x008FE20B, 0x00F684FC, 0x050F7C24,
90 0x00F4857C, 0x0A1F402E, 0x00FA027F, 0x0E076E1D
91};
92
93/* AWG instructions for some video modes */
94#define AWG_MAX_INST 64
95
96/* 720p@50 */
97static u32 AWGi_720p_50[] = {
98 0x00000971, 0x00000C26, 0x0000013B, 0x00000CDA,
99 0x00000104, 0x00000E7E, 0x00000E7F, 0x0000013B,
100 0x00000D8E, 0x00000104, 0x00001804, 0x00000971,
101 0x00000C26, 0x0000003B, 0x00000FB4, 0x00000FB5,
102 0x00000104, 0x00001AE8
103};
104
105#define NN_720p_50 ARRAY_SIZE(AWGi_720p_50)
106
107/* 720p@60 */
108static u32 AWGi_720p_60[] = {
109 0x00000971, 0x00000C26, 0x0000013B, 0x00000CDA,
110 0x00000104, 0x00000E7E, 0x00000E7F, 0x0000013B,
111 0x00000C44, 0x00000104, 0x00001804, 0x00000971,
112 0x00000C26, 0x0000003B, 0x00000F0F, 0x00000F10,
113 0x00000104, 0x00001AE8
114};
115
116#define NN_720p_60 ARRAY_SIZE(AWGi_720p_60)
117
118/* 1080p@30 */
119static u32 AWGi_1080p_30[] = {
120 0x00000971, 0x00000C2A, 0x0000013B, 0x00000C56,
121 0x00000104, 0x00000FDC, 0x00000FDD, 0x0000013B,
122 0x00000C2A, 0x00000104, 0x00001804, 0x00000971,
123 0x00000C2A, 0x0000003B, 0x00000EBE, 0x00000EBF,
124 0x00000EBF, 0x00000104, 0x00001A2F, 0x00001C4B,
125 0x00001C52
126};
127
128#define NN_1080p_30 ARRAY_SIZE(AWGi_1080p_30)
129
130/* 1080p@25 */
131static u32 AWGi_1080p_25[] = {
132 0x00000971, 0x00000C2A, 0x0000013B, 0x00000C56,
133 0x00000104, 0x00000FDC, 0x00000FDD, 0x0000013B,
134 0x00000DE2, 0x00000104, 0x00001804, 0x00000971,
135 0x00000C2A, 0x0000003B, 0x00000F51, 0x00000F51,
136 0x00000F52, 0x00000104, 0x00001A2F, 0x00001C4B,
137 0x00001C52
138};
139
140#define NN_1080p_25 ARRAY_SIZE(AWGi_1080p_25)
141
142/* 1080p@24 */
143static u32 AWGi_1080p_24[] = {
144 0x00000971, 0x00000C2A, 0x0000013B, 0x00000C56,
145 0x00000104, 0x00000FDC, 0x00000FDD, 0x0000013B,
146 0x00000E50, 0x00000104, 0x00001804, 0x00000971,
147 0x00000C2A, 0x0000003B, 0x00000F76, 0x00000F76,
148 0x00000F76, 0x00000104, 0x00001A2F, 0x00001C4B,
149 0x00001C52
150};
151
152#define NN_1080p_24 ARRAY_SIZE(AWGi_1080p_24)
153
154/* 720x480p@60 */
155static u32 AWGi_720x480p_60[] = {
156 0x00000904, 0x00000F18, 0x0000013B, 0x00001805,
157 0x00000904, 0x00000C3D, 0x0000003B, 0x00001A06
158};
159
160#define NN_720x480p_60 ARRAY_SIZE(AWGi_720x480p_60)
161
162/* Video mode category */
163enum sti_hda_vid_cat {
164 VID_SD,
165 VID_ED,
166 VID_HD_74M,
167 VID_HD_148M
168};
169
170struct sti_hda_video_config {
171 struct drm_display_mode mode;
172 u32 *awg_instr;
173 int nb_instr;
174 enum sti_hda_vid_cat vid_cat;
175};
176
177/* HD analog supported modes
178 * Interlaced modes may be added when supported by the whole display chain
179 */
180static const struct sti_hda_video_config hda_supported_modes[] = {
181 /* 1080p30 74.250Mhz */
182 {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
183 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
184 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
185 AWGi_1080p_30, NN_1080p_30, VID_HD_74M},
186 /* 1080p30 74.176Mhz */
187 {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74176, 1920, 2008,
188 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
189 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
190 AWGi_1080p_30, NN_1080p_30, VID_HD_74M},
191 /* 1080p24 74.250Mhz */
192 {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
193 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
194 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
195 AWGi_1080p_24, NN_1080p_24, VID_HD_74M},
196 /* 1080p24 74.176Mhz */
197 {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74176, 1920, 2558,
198 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
199 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
200 AWGi_1080p_24, NN_1080p_24, VID_HD_74M},
201 /* 1080p25 74.250Mhz */
202 {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
203 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
204 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
205 AWGi_1080p_25, NN_1080p_25, VID_HD_74M},
206 /* 720p60 74.250Mhz */
207 {{DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
208 1430, 1650, 0, 720, 725, 730, 750, 0,
209 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
210 AWGi_720p_60, NN_720p_60, VID_HD_74M},
211 /* 720p60 74.176Mhz */
212 {{DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74176, 1280, 1390,
213 1430, 1650, 0, 720, 725, 730, 750, 0,
214 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
215 AWGi_720p_60, NN_720p_60, VID_HD_74M},
216 /* 720p50 74.250Mhz */
217 {{DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
218 1760, 1980, 0, 720, 725, 730, 750, 0,
219 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
220 AWGi_720p_50, NN_720p_50, VID_HD_74M},
221 /* 720x480p60 27.027Mhz */
222 {{DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27027, 720, 736,
223 798, 858, 0, 480, 489, 495, 525, 0,
224 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
225 AWGi_720x480p_60, NN_720x480p_60, VID_ED},
226 /* 720x480p60 27.000Mhz */
227 {{DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
228 798, 858, 0, 480, 489, 495, 525, 0,
229 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
230 AWGi_720x480p_60, NN_720x480p_60, VID_ED}
231};
232
233/**
234 * STI hd analog structure
235 *
236 * @dev: driver device
237 * @drm_dev: pointer to drm device
238 * @mode: current display mode selected
239 * @regs: HD analog register
240 * @video_dacs_ctrl: video DACS control register
241 * @enabled: true if HD analog is enabled else false
242 */
243struct sti_hda {
244 struct device dev;
245 struct drm_device *drm_dev;
246 struct drm_display_mode mode;
247 void __iomem *regs;
248 void __iomem *video_dacs_ctrl;
249 struct clk *clk_pix;
250 struct clk *clk_hddac;
251 bool enabled;
252};
253
254struct sti_hda_connector {
255 struct drm_connector drm_connector;
256 struct drm_encoder *encoder;
257 struct sti_hda *hda;
258};
259
260#define to_sti_hda_connector(x) \
261 container_of(x, struct sti_hda_connector, drm_connector)
262
263static u32 hda_read(struct sti_hda *hda, int offset)
264{
265 return readl(hda->regs + offset);
266}
267
268static void hda_write(struct sti_hda *hda, u32 val, int offset)
269{
270 writel(val, hda->regs + offset);
271}
272
273/**
274 * Search for a video mode in the supported modes table
275 *
276 * @mode: mode being searched
277 * @idx: index of the found mode
278 *
279 * Return true if mode is found
280 */
281static bool hda_get_mode_idx(struct drm_display_mode mode, int *idx)
282{
283 unsigned int i;
284
285 for (i = 0; i < ARRAY_SIZE(hda_supported_modes); i++)
286 if (drm_mode_equal(&hda_supported_modes[i].mode, &mode)) {
287 *idx = i;
288 return true;
289 }
290 return false;
291}
292
293/**
294 * Enable the HD DACS
295 *
296 * @hda: pointer to HD analog structure
297 * @enable: true if HD DACS need to be enabled, else false
298 */
299static void hda_enable_hd_dacs(struct sti_hda *hda, bool enable)
300{
301 u32 mask;
302
303 if (hda->video_dacs_ctrl) {
304 u32 val;
305
306 switch ((u32)hda->video_dacs_ctrl & VIDEO_DACS_CONTROL_MASK) {
307 case VIDEO_DACS_CONTROL_SYSCFG2535:
308 mask = DAC_CFG_HD_OFF_MASK;
309 break;
310 case VIDEO_DACS_CONTROL_SYSCFG5072:
311 mask = DAC_CFG_HD_HZUVW_OFF_MASK;
312 break;
313 default:
314 DRM_INFO("Video DACS control register not supported!");
315 return;
316 }
317
318 val = readl(hda->video_dacs_ctrl);
319 if (enable)
320 val &= ~mask;
321 else
322 val |= mask;
323
324 writel(val, hda->video_dacs_ctrl);
325 }
326}
327
328/**
329 * Configure AWG, writing instructions
330 *
331 * @hda: pointer to HD analog structure
332 * @awg_instr: pointer to AWG instructions table
333 * @nb: nb of AWG instructions
334 */
335static void sti_hda_configure_awg(struct sti_hda *hda, u32 *awg_instr, int nb)
336{
337 unsigned int i;
338
339 DRM_DEBUG_DRIVER("\n");
340
341 for (i = 0; i < nb; i++)
342 hda_write(hda, awg_instr[i], HDA_SYNC_AWGI + i * 4);
343 for (i = nb; i < AWG_MAX_INST; i++)
344 hda_write(hda, 0, HDA_SYNC_AWGI + i * 4);
345}
346
347static void sti_hda_disable(struct drm_bridge *bridge)
348{
349 struct sti_hda *hda = bridge->driver_private;
350 u32 val;
351
352 if (!hda->enabled)
353 return;
354
355 DRM_DEBUG_DRIVER("\n");
356
357 /* Disable HD DAC and AWG */
358 val = hda_read(hda, HDA_ANA_CFG);
359 val &= ~CFG_AWG_ASYNC_EN;
360 hda_write(hda, val, HDA_ANA_CFG);
361 hda_write(hda, 0, HDA_ANA_ANC_CTRL);
362
363 hda_enable_hd_dacs(hda, false);
364
365 /* Disable/unprepare hda clock */
366 clk_disable_unprepare(hda->clk_hddac);
367 clk_disable_unprepare(hda->clk_pix);
368
369 hda->enabled = false;
370}
371
372static void sti_hda_pre_enable(struct drm_bridge *bridge)
373{
374 struct sti_hda *hda = bridge->driver_private;
375 u32 val, i, mode_idx;
376 u32 src_filter_y, src_filter_c;
377 u32 *coef_y, *coef_c;
378 u32 filter_mode;
379
380 DRM_DEBUG_DRIVER("\n");
381
382 if (hda->enabled)
383 return;
384
385 /* Prepare/enable clocks */
386 if (clk_prepare_enable(hda->clk_pix))
387 DRM_ERROR("Failed to prepare/enable hda_pix clk\n");
388 if (clk_prepare_enable(hda->clk_hddac))
389 DRM_ERROR("Failed to prepare/enable hda_hddac clk\n");
390
391 if (!hda_get_mode_idx(hda->mode, &mode_idx)) {
392 DRM_ERROR("Undefined mode\n");
393 return;
394 }
395
396 switch (hda_supported_modes[mode_idx].vid_cat) {
397 case VID_HD_148M:
398 DRM_ERROR("Beyond HD analog capabilities\n");
399 return;
400 case VID_HD_74M:
401 /* HD use alternate 2x filter */
402 filter_mode = CFG_AWG_FLTR_MODE_HD;
403 src_filter_y = HDA_ANA_SRC_Y_CFG_ALT_2X;
404 src_filter_c = HDA_ANA_SRC_C_CFG_ALT_2X;
405 coef_y = coef_y_alt_2x;
406 coef_c = coef_c_alt_2x;
407 break;
408 case VID_ED:
409 /* ED uses 4x filter */
410 filter_mode = CFG_AWG_FLTR_MODE_ED;
411 src_filter_y = HDA_ANA_SRC_Y_CFG_4X;
412 src_filter_c = HDA_ANA_SRC_C_CFG_4X;
413 coef_y = coef_yc_4x;
414 coef_c = coef_yc_4x;
415 break;
416 case VID_SD:
417 DRM_ERROR("Not supported\n");
418 return;
419 default:
420 DRM_ERROR("Undefined resolution\n");
421 return;
422 }
423 DRM_DEBUG_DRIVER("Using HDA mode #%d\n", mode_idx);
424
425 /* Enable HD Video DACs */
426 hda_enable_hd_dacs(hda, true);
427
428 /* Configure scaler */
429 hda_write(hda, SCALE_CTRL_Y_DFLT, HDA_ANA_SCALE_CTRL_Y);
430 hda_write(hda, SCALE_CTRL_CB_DFLT, HDA_ANA_SCALE_CTRL_CB);
431 hda_write(hda, SCALE_CTRL_CR_DFLT, HDA_ANA_SCALE_CTRL_CR);
432
433 /* Configure sampler */
434 hda_write(hda , src_filter_y, HDA_ANA_SRC_Y_CFG);
435 hda_write(hda, src_filter_c, HDA_ANA_SRC_C_CFG);
436 for (i = 0; i < SAMPLER_COEF_NB; i++) {
437 hda_write(hda, coef_y[i], HDA_COEFF_Y_PH1_TAP123 + i * 4);
438 hda_write(hda, coef_c[i], HDA_COEFF_C_PH1_TAP123 + i * 4);
439 }
440
441 /* Configure main HDFormatter */
442 val = 0;
443 val |= (hda->mode.flags & DRM_MODE_FLAG_INTERLACE) ?
444 0 : CFG_AWG_ASYNC_VSYNC_MTD;
445 val |= (CFG_PBPR_SYNC_OFF_VAL << CFG_PBPR_SYNC_OFF_SHIFT);
446 val |= filter_mode;
447 hda_write(hda, val, HDA_ANA_CFG);
448
449 /* Configure AWG */
450 sti_hda_configure_awg(hda, hda_supported_modes[mode_idx].awg_instr,
451 hda_supported_modes[mode_idx].nb_instr);
452
453 /* Enable AWG */
454 val = hda_read(hda, HDA_ANA_CFG);
455 val |= CFG_AWG_ASYNC_EN;
456 hda_write(hda, val, HDA_ANA_CFG);
457
458 hda->enabled = true;
459}
460
461static void sti_hda_set_mode(struct drm_bridge *bridge,
462 struct drm_display_mode *mode,
463 struct drm_display_mode *adjusted_mode)
464{
465 struct sti_hda *hda = bridge->driver_private;
466 u32 mode_idx;
467 int hddac_rate;
468 int ret;
469
470 DRM_DEBUG_DRIVER("\n");
471
472 memcpy(&hda->mode, mode, sizeof(struct drm_display_mode));
473
474 if (!hda_get_mode_idx(hda->mode, &mode_idx)) {
475 DRM_ERROR("Undefined mode\n");
476 return;
477 }
478
479 switch (hda_supported_modes[mode_idx].vid_cat) {
480 case VID_HD_74M:
481 /* HD use alternate 2x filter */
482 hddac_rate = mode->clock * 1000 * 2;
483 break;
484 case VID_ED:
485 /* ED uses 4x filter */
486 hddac_rate = mode->clock * 1000 * 4;
487 break;
488 default:
489 DRM_ERROR("Undefined mode\n");
490 return;
491 }
492
493 /* HD DAC = 148.5Mhz or 108 Mhz */
494 ret = clk_set_rate(hda->clk_hddac, hddac_rate);
495 if (ret < 0)
496 DRM_ERROR("Cannot set rate (%dHz) for hda_hddac clk\n",
497 hddac_rate);
498
499 /* HDformatter clock = compositor clock */
500 ret = clk_set_rate(hda->clk_pix, mode->clock * 1000);
501 if (ret < 0)
502 DRM_ERROR("Cannot set rate (%dHz) for hda_pix clk\n",
503 mode->clock * 1000);
504}
505
506static void sti_hda_bridge_nope(struct drm_bridge *bridge)
507{
508 /* do nothing */
509}
510
511static void sti_hda_brigde_destroy(struct drm_bridge *bridge)
512{
513 drm_bridge_cleanup(bridge);
514 kfree(bridge);
515}
516
517static const struct drm_bridge_funcs sti_hda_bridge_funcs = {
518 .pre_enable = sti_hda_pre_enable,
519 .enable = sti_hda_bridge_nope,
520 .disable = sti_hda_disable,
521 .post_disable = sti_hda_bridge_nope,
522 .mode_set = sti_hda_set_mode,
523 .destroy = sti_hda_brigde_destroy,
524};
525
526static int sti_hda_connector_get_modes(struct drm_connector *connector)
527{
528 unsigned int i;
529 int count = 0;
530 struct sti_hda_connector *hda_connector
531 = to_sti_hda_connector(connector);
532 struct sti_hda *hda = hda_connector->hda;
533
534 DRM_DEBUG_DRIVER("\n");
535
536 for (i = 0; i < ARRAY_SIZE(hda_supported_modes); i++) {
537 struct drm_display_mode *mode =
538 drm_mode_duplicate(hda->drm_dev,
539 &hda_supported_modes[i].mode);
540 if (!mode)
541 continue;
542 mode->vrefresh = drm_mode_vrefresh(mode);
543
544 /* the first mode is the preferred mode */
545 if (i == 0)
546 mode->type |= DRM_MODE_TYPE_PREFERRED;
547
548 drm_mode_probed_add(connector, mode);
549 count++;
550 }
551
552 drm_mode_sort(&connector->modes);
553
554 return count;
555}
556
557#define CLK_TOLERANCE_HZ 50
558
559static int sti_hda_connector_mode_valid(struct drm_connector *connector,
560 struct drm_display_mode *mode)
561{
562 int target = mode->clock * 1000;
563 int target_min = target - CLK_TOLERANCE_HZ;
564 int target_max = target + CLK_TOLERANCE_HZ;
565 int result;
566 int idx;
567 struct sti_hda_connector *hda_connector
568 = to_sti_hda_connector(connector);
569 struct sti_hda *hda = hda_connector->hda;
570
571 if (!hda_get_mode_idx(*mode, &idx)) {
572 return MODE_BAD;
573 } else {
574 result = clk_round_rate(hda->clk_pix, target);
575
576 DRM_DEBUG_DRIVER("target rate = %d => available rate = %d\n",
577 target, result);
578
579 if ((result < target_min) || (result > target_max)) {
580 DRM_DEBUG_DRIVER("hda pixclk=%d not supported\n",
581 target);
582 return MODE_BAD;
583 }
584 }
585
586 return MODE_OK;
587}
588
589struct drm_encoder *sti_hda_best_encoder(struct drm_connector *connector)
590{
591 struct sti_hda_connector *hda_connector
592 = to_sti_hda_connector(connector);
593
594 /* Best encoder is the one associated during connector creation */
595 return hda_connector->encoder;
596}
597
598static struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = {
599 .get_modes = sti_hda_connector_get_modes,
600 .mode_valid = sti_hda_connector_mode_valid,
601 .best_encoder = sti_hda_best_encoder,
602};
603
604static enum drm_connector_status
605sti_hda_connector_detect(struct drm_connector *connector, bool force)
606{
607 return connector_status_connected;
608}
609
610static void sti_hda_connector_destroy(struct drm_connector *connector)
611{
612 struct sti_hda_connector *hda_connector
613 = to_sti_hda_connector(connector);
614
615 drm_connector_unregister(connector);
616 drm_connector_cleanup(connector);
617 kfree(hda_connector);
618}
619
620static struct drm_connector_funcs sti_hda_connector_funcs = {
621 .dpms = drm_helper_connector_dpms,
622 .fill_modes = drm_helper_probe_single_connector_modes,
623 .detect = sti_hda_connector_detect,
624 .destroy = sti_hda_connector_destroy,
625};
626
627static struct drm_encoder *sti_hda_find_encoder(struct drm_device *dev)
628{
629 struct drm_encoder *encoder;
630
631 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
632 if (encoder->encoder_type == DRM_MODE_ENCODER_DAC)
633 return encoder;
634 }
635
636 return NULL;
637}
638
639static int sti_hda_bind(struct device *dev, struct device *master, void *data)
640{
641 struct sti_hda *hda = dev_get_drvdata(dev);
642 struct drm_device *drm_dev = data;
643 struct drm_encoder *encoder;
644 struct sti_hda_connector *connector;
645 struct drm_connector *drm_connector;
646 struct drm_bridge *bridge;
647 int err;
648
649 /* Set the drm device handle */
650 hda->drm_dev = drm_dev;
651
652 encoder = sti_hda_find_encoder(drm_dev);
653 if (!encoder)
654 return -ENOMEM;
655
656 connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
657 if (!connector)
658 return -ENOMEM;
659
660 connector->hda = hda;
661
662 bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
663 if (!bridge)
664 return -ENOMEM;
665
666 bridge->driver_private = hda;
667 drm_bridge_init(drm_dev, bridge, &sti_hda_bridge_funcs);
668
669 encoder->bridge = bridge;
670 connector->encoder = encoder;
671
672 drm_connector = (struct drm_connector *)connector;
673
674 drm_connector->polled = DRM_CONNECTOR_POLL_HPD;
675
676 drm_connector_init(drm_dev, drm_connector,
677 &sti_hda_connector_funcs, DRM_MODE_CONNECTOR_Component);
678 drm_connector_helper_add(drm_connector,
679 &sti_hda_connector_helper_funcs);
680
681 err = drm_connector_register(drm_connector);
682 if (err)
683 goto err_connector;
684
685 err = drm_mode_connector_attach_encoder(drm_connector, encoder);
686 if (err) {
687 DRM_ERROR("Failed to attach a connector to a encoder\n");
688 goto err_sysfs;
689 }
690
691 return 0;
692
693err_sysfs:
694 drm_connector_unregister(drm_connector);
695err_connector:
696 drm_bridge_cleanup(bridge);
697 drm_connector_cleanup(drm_connector);
698 return -EINVAL;
699}
700
701static void sti_hda_unbind(struct device *dev,
702 struct device *master, void *data)
703{
704 /* do nothing */
705}
706
707static const struct component_ops sti_hda_ops = {
708 .bind = sti_hda_bind,
709 .unbind = sti_hda_unbind,
710};
711
712static int sti_hda_probe(struct platform_device *pdev)
713{
714 struct device *dev = &pdev->dev;
715 struct sti_hda *hda;
716 struct resource *res;
717
718 DRM_INFO("%s\n", __func__);
719
720 hda = devm_kzalloc(dev, sizeof(*hda), GFP_KERNEL);
721 if (!hda)
722 return -ENOMEM;
723
724 hda->dev = pdev->dev;
725
726 /* Get resources */
727 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hda-reg");
728 if (!res) {
729 DRM_ERROR("Invalid hda resource\n");
730 return -ENOMEM;
731 }
732 hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
733 if (IS_ERR(hda->regs))
734 return PTR_ERR(hda->regs);
735
736 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
737 "video-dacs-ctrl");
738 if (res) {
739 hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start,
740 resource_size(res));
741 if (IS_ERR(hda->video_dacs_ctrl))
742 return PTR_ERR(hda->video_dacs_ctrl);
743 } else {
744 /* If no existing video-dacs-ctrl resource continue the probe */
745 DRM_DEBUG_DRIVER("No video-dacs-ctrl resource\n");
746 hda->video_dacs_ctrl = NULL;
747 }
748
749 /* Get clock resources */
750 hda->clk_pix = devm_clk_get(dev, "pix");
751 if (IS_ERR(hda->clk_pix)) {
752 DRM_ERROR("Cannot get hda_pix clock\n");
753 return PTR_ERR(hda->clk_pix);
754 }
755
756 hda->clk_hddac = devm_clk_get(dev, "hddac");
757 if (IS_ERR(hda->clk_hddac)) {
758 DRM_ERROR("Cannot get hda_hddac clock\n");
759 return PTR_ERR(hda->clk_hddac);
760 }
761
762 platform_set_drvdata(pdev, hda);
763
764 return component_add(&pdev->dev, &sti_hda_ops);
765}
766
767static int sti_hda_remove(struct platform_device *pdev)
768{
769 component_del(&pdev->dev, &sti_hda_ops);
770 return 0;
771}
772
773static struct of_device_id hda_of_match[] = {
774 { .compatible = "st,stih416-hda", },
775 { .compatible = "st,stih407-hda", },
776 { /* end node */ }
777};
778MODULE_DEVICE_TABLE(of, hda_of_match);
779
780struct platform_driver sti_hda_driver = {
781 .driver = {
782 .name = "sti-hda",
783 .owner = THIS_MODULE,
784 .of_match_table = hda_of_match,
785 },
786 .probe = sti_hda_probe,
787 .remove = sti_hda_remove,
788};
789
790module_platform_driver(sti_hda_driver);
791
792MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
793MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
794MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
new file mode 100644
index 000000000000..284e541d970d
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -0,0 +1,810 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#include <linux/clk.h>
8#include <linux/component.h>
9#include <linux/hdmi.h>
10#include <linux/module.h>
11#include <linux/of_gpio.h>
12#include <linux/platform_device.h>
13#include <linux/reset.h>
14
15#include <drm/drmP.h>
16#include <drm/drm_crtc_helper.h>
17#include <drm/drm_edid.h>
18
19#include "sti_hdmi.h"
20#include "sti_hdmi_tx3g4c28phy.h"
21#include "sti_hdmi_tx3g0c55phy.h"
22#include "sti_vtg.h"
23
24#define HDMI_CFG 0x0000
25#define HDMI_INT_EN 0x0004
26#define HDMI_INT_STA 0x0008
27#define HDMI_INT_CLR 0x000C
28#define HDMI_STA 0x0010
29#define HDMI_ACTIVE_VID_XMIN 0x0100
30#define HDMI_ACTIVE_VID_XMAX 0x0104
31#define HDMI_ACTIVE_VID_YMIN 0x0108
32#define HDMI_ACTIVE_VID_YMAX 0x010C
33#define HDMI_DFLT_CHL0_DAT 0x0110
34#define HDMI_DFLT_CHL1_DAT 0x0114
35#define HDMI_DFLT_CHL2_DAT 0x0118
36#define HDMI_SW_DI_1_HEAD_WORD 0x0210
37#define HDMI_SW_DI_1_PKT_WORD0 0x0214
38#define HDMI_SW_DI_1_PKT_WORD1 0x0218
39#define HDMI_SW_DI_1_PKT_WORD2 0x021C
40#define HDMI_SW_DI_1_PKT_WORD3 0x0220
41#define HDMI_SW_DI_1_PKT_WORD4 0x0224
42#define HDMI_SW_DI_1_PKT_WORD5 0x0228
43#define HDMI_SW_DI_1_PKT_WORD6 0x022C
44#define HDMI_SW_DI_CFG 0x0230
45
46#define HDMI_IFRAME_SLOT_AVI 1
47
48#define XCAT(prefix, x, suffix) prefix ## x ## suffix
49#define HDMI_SW_DI_N_HEAD_WORD(x) XCAT(HDMI_SW_DI_, x, _HEAD_WORD)
50#define HDMI_SW_DI_N_PKT_WORD0(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD0)
51#define HDMI_SW_DI_N_PKT_WORD1(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD1)
52#define HDMI_SW_DI_N_PKT_WORD2(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD2)
53#define HDMI_SW_DI_N_PKT_WORD3(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD3)
54#define HDMI_SW_DI_N_PKT_WORD4(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD4)
55#define HDMI_SW_DI_N_PKT_WORD5(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD5)
56#define HDMI_SW_DI_N_PKT_WORD6(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD6)
57
58#define HDMI_IFRAME_DISABLED 0x0
59#define HDMI_IFRAME_SINGLE_SHOT 0x1
60#define HDMI_IFRAME_FIELD 0x2
61#define HDMI_IFRAME_FRAME 0x3
62#define HDMI_IFRAME_MASK 0x3
63#define HDMI_IFRAME_CFG_DI_N(x, n) ((x) << ((n-1)*4)) /* n from 1 to 6 */
64
65#define HDMI_CFG_DEVICE_EN BIT(0)
66#define HDMI_CFG_HDMI_NOT_DVI BIT(1)
67#define HDMI_CFG_HDCP_EN BIT(2)
68#define HDMI_CFG_ESS_NOT_OESS BIT(3)
69#define HDMI_CFG_H_SYNC_POL_NEG BIT(4)
70#define HDMI_CFG_SINK_TERM_DET_EN BIT(5)
71#define HDMI_CFG_V_SYNC_POL_NEG BIT(6)
72#define HDMI_CFG_422_EN BIT(8)
73#define HDMI_CFG_FIFO_OVERRUN_CLR BIT(12)
74#define HDMI_CFG_FIFO_UNDERRUN_CLR BIT(13)
75#define HDMI_CFG_SW_RST_EN BIT(31)
76
77#define HDMI_INT_GLOBAL BIT(0)
78#define HDMI_INT_SW_RST BIT(1)
79#define HDMI_INT_PIX_CAP BIT(3)
80#define HDMI_INT_HOT_PLUG BIT(4)
81#define HDMI_INT_DLL_LCK BIT(5)
82#define HDMI_INT_NEW_FRAME BIT(6)
83#define HDMI_INT_GENCTRL_PKT BIT(7)
84#define HDMI_INT_SINK_TERM_PRESENT BIT(11)
85
86#define HDMI_DEFAULT_INT (HDMI_INT_SINK_TERM_PRESENT \
87 | HDMI_INT_DLL_LCK \
88 | HDMI_INT_HOT_PLUG \
89 | HDMI_INT_GLOBAL)
90
91#define HDMI_WORKING_INT (HDMI_INT_SINK_TERM_PRESENT \
92 | HDMI_INT_GENCTRL_PKT \
93 | HDMI_INT_NEW_FRAME \
94 | HDMI_INT_DLL_LCK \
95 | HDMI_INT_HOT_PLUG \
96 | HDMI_INT_PIX_CAP \
97 | HDMI_INT_SW_RST \
98 | HDMI_INT_GLOBAL)
99
100#define HDMI_STA_SW_RST BIT(1)
101
102struct sti_hdmi_connector {
103 struct drm_connector drm_connector;
104 struct drm_encoder *encoder;
105 struct sti_hdmi *hdmi;
106};
107
108#define to_sti_hdmi_connector(x) \
109 container_of(x, struct sti_hdmi_connector, drm_connector)
110
111u32 hdmi_read(struct sti_hdmi *hdmi, int offset)
112{
113 return readl(hdmi->regs + offset);
114}
115
116void hdmi_write(struct sti_hdmi *hdmi, u32 val, int offset)
117{
118 writel(val, hdmi->regs + offset);
119}
120
121/**
122 * HDMI interrupt handler threaded
123 *
124 * @irq: irq number
125 * @arg: connector structure
126 */
127static irqreturn_t hdmi_irq_thread(int irq, void *arg)
128{
129 struct sti_hdmi *hdmi = arg;
130
131 /* Hot plug/unplug IRQ */
132 if (hdmi->irq_status & HDMI_INT_HOT_PLUG) {
133 /* read gpio to get the status */
134 hdmi->hpd = gpio_get_value(hdmi->hpd_gpio);
135 if (hdmi->drm_dev)
136 drm_helper_hpd_irq_event(hdmi->drm_dev);
137 }
138
139 /* Sw reset and PLL lock are exclusive so we can use the same
140 * event to signal them
141 */
142 if (hdmi->irq_status & (HDMI_INT_SW_RST | HDMI_INT_DLL_LCK)) {
143 hdmi->event_received = true;
144 wake_up_interruptible(&hdmi->wait_event);
145 }
146
147 return IRQ_HANDLED;
148}
149
150/**
151 * HDMI interrupt handler
152 *
153 * @irq: irq number
154 * @arg: connector structure
155 */
156static irqreturn_t hdmi_irq(int irq, void *arg)
157{
158 struct sti_hdmi *hdmi = arg;
159
160 /* read interrupt status */
161 hdmi->irq_status = hdmi_read(hdmi, HDMI_INT_STA);
162
163 /* clear interrupt status */
164 hdmi_write(hdmi, hdmi->irq_status, HDMI_INT_CLR);
165
166 /* force sync bus write */
167 hdmi_read(hdmi, HDMI_INT_STA);
168
169 return IRQ_WAKE_THREAD;
170}
171
172/**
173 * Set hdmi active area depending on the drm display mode selected
174 *
175 * @hdmi: pointer on the hdmi internal structure
176 */
177static void hdmi_active_area(struct sti_hdmi *hdmi)
178{
179 u32 xmin, xmax;
180 u32 ymin, ymax;
181
182 xmin = sti_vtg_get_pixel_number(hdmi->mode, 0);
183 xmax = sti_vtg_get_pixel_number(hdmi->mode, hdmi->mode.hdisplay - 1);
184 ymin = sti_vtg_get_line_number(hdmi->mode, 0);
185 ymax = sti_vtg_get_line_number(hdmi->mode, hdmi->mode.vdisplay - 1);
186
187 hdmi_write(hdmi, xmin, HDMI_ACTIVE_VID_XMIN);
188 hdmi_write(hdmi, xmax, HDMI_ACTIVE_VID_XMAX);
189 hdmi_write(hdmi, ymin, HDMI_ACTIVE_VID_YMIN);
190 hdmi_write(hdmi, ymax, HDMI_ACTIVE_VID_YMAX);
191}
192
193/**
194 * Overall hdmi configuration
195 *
196 * @hdmi: pointer on the hdmi internal structure
197 */
198static void hdmi_config(struct sti_hdmi *hdmi)
199{
200 u32 conf;
201
202 DRM_DEBUG_DRIVER("\n");
203
204 /* Clear overrun and underrun fifo */
205 conf = HDMI_CFG_FIFO_OVERRUN_CLR | HDMI_CFG_FIFO_UNDERRUN_CLR;
206
207 /* Enable HDMI mode not DVI */
208 conf |= HDMI_CFG_HDMI_NOT_DVI | HDMI_CFG_ESS_NOT_OESS;
209
210 /* Enable sink term detection */
211 conf |= HDMI_CFG_SINK_TERM_DET_EN;
212
213 /* Set Hsync polarity */
214 if (hdmi->mode.flags & DRM_MODE_FLAG_NHSYNC) {
215 DRM_DEBUG_DRIVER("H Sync Negative\n");
216 conf |= HDMI_CFG_H_SYNC_POL_NEG;
217 }
218
219 /* Set Vsync polarity */
220 if (hdmi->mode.flags & DRM_MODE_FLAG_NVSYNC) {
221 DRM_DEBUG_DRIVER("V Sync Negative\n");
222 conf |= HDMI_CFG_V_SYNC_POL_NEG;
223 }
224
225 /* Enable HDMI */
226 conf |= HDMI_CFG_DEVICE_EN;
227
228 hdmi_write(hdmi, conf, HDMI_CFG);
229}
230
231/**
232 * Prepare and configure the AVI infoframe
233 *
234 * AVI infoframe are transmitted at least once per two video field and
235 * contains information about HDMI transmission mode such as color space,
236 * colorimetry, ...
237 *
238 * @hdmi: pointer on the hdmi internal structure
239 *
240 * Return negative value if error occurs
241 */
242static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
243{
244 struct drm_display_mode *mode = &hdmi->mode;
245 struct hdmi_avi_infoframe infoframe;
246 u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
247 u8 *frame = buffer + HDMI_INFOFRAME_HEADER_SIZE;
248 u32 val;
249 int ret;
250
251 DRM_DEBUG_DRIVER("\n");
252
253 ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe, mode);
254 if (ret < 0) {
255 DRM_ERROR("failed to setup AVI infoframe: %d\n", ret);
256 return ret;
257 }
258
259 /* fixed infoframe configuration not linked to the mode */
260 infoframe.colorspace = HDMI_COLORSPACE_RGB;
261 infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
262 infoframe.colorimetry = HDMI_COLORIMETRY_NONE;
263
264 ret = hdmi_avi_infoframe_pack(&infoframe, buffer, sizeof(buffer));
265 if (ret < 0) {
266 DRM_ERROR("failed to pack AVI infoframe: %d\n", ret);
267 return ret;
268 }
269
270 /* Disable transmission slot for AVI infoframe */
271 val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
272 val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, HDMI_IFRAME_SLOT_AVI);
273 hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
274
275 /* Infoframe header */
276 val = buffer[0x0];
277 val |= buffer[0x1] << 8;
278 val |= buffer[0x2] << 16;
279 hdmi_write(hdmi, val, HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI));
280
281 /* Infoframe packet bytes */
282 val = frame[0x0];
283 val |= frame[0x1] << 8;
284 val |= frame[0x2] << 16;
285 val |= frame[0x3] << 24;
286 hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI));
287
288 val = frame[0x4];
289 val |= frame[0x5] << 8;
290 val |= frame[0x6] << 16;
291 val |= frame[0x7] << 24;
292 hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD1(HDMI_IFRAME_SLOT_AVI));
293
294 val = frame[0x8];
295 val |= frame[0x9] << 8;
296 val |= frame[0xA] << 16;
297 val |= frame[0xB] << 24;
298 hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI));
299
300 val = frame[0xC];
301 val |= frame[0xD] << 8;
302 hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI));
303
304 /* Enable transmission slot for AVI infoframe
305 * According to the hdmi specification, AVI infoframe should be
306 * transmitted at least once per two video fields
307 */
308 val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
309 val |= HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_FIELD, HDMI_IFRAME_SLOT_AVI);
310 hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
311
312 return 0;
313}
314
315/**
316 * Software reset of the hdmi subsystem
317 *
318 * @hdmi: pointer on the hdmi internal structure
319 *
320 */
321#define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */
322static void hdmi_swreset(struct sti_hdmi *hdmi)
323{
324 u32 val;
325
326 DRM_DEBUG_DRIVER("\n");
327
328 /* Enable hdmi_audio clock only during hdmi reset */
329 if (clk_prepare_enable(hdmi->clk_audio))
330 DRM_INFO("Failed to prepare/enable hdmi_audio clk\n");
331
332 /* Sw reset */
333 hdmi->event_received = false;
334
335 val = hdmi_read(hdmi, HDMI_CFG);
336 val |= HDMI_CFG_SW_RST_EN;
337 hdmi_write(hdmi, val, HDMI_CFG);
338
339 /* Wait reset completed */
340 wait_event_interruptible_timeout(hdmi->wait_event,
341 hdmi->event_received == true,
342 msecs_to_jiffies
343 (HDMI_TIMEOUT_SWRESET));
344
345 /*
346 * HDMI_STA_SW_RST bit is set to '1' when SW_RST bit in HDMI_CFG is
347 * set to '1' and clk_audio is running.
348 */
349 if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_SW_RST) == 0)
350 DRM_DEBUG_DRIVER("Warning: HDMI sw reset timeout occurs\n");
351
352 val = hdmi_read(hdmi, HDMI_CFG);
353 val &= ~HDMI_CFG_SW_RST_EN;
354 hdmi_write(hdmi, val, HDMI_CFG);
355
356 /* Disable hdmi_audio clock. Not used anymore for drm purpose */
357 clk_disable_unprepare(hdmi->clk_audio);
358}
359
360static void sti_hdmi_disable(struct drm_bridge *bridge)
361{
362 struct sti_hdmi *hdmi = bridge->driver_private;
363
364 u32 val = hdmi_read(hdmi, HDMI_CFG);
365
366 if (!hdmi->enabled)
367 return;
368
369 DRM_DEBUG_DRIVER("\n");
370
371 /* Disable HDMI */
372 val &= ~HDMI_CFG_DEVICE_EN;
373 hdmi_write(hdmi, val, HDMI_CFG);
374
375 hdmi_write(hdmi, 0xffffffff, HDMI_INT_CLR);
376
377 /* Stop the phy */
378 hdmi->phy_ops->stop(hdmi);
379
380 /* Set the default channel data to be a dark red */
381 hdmi_write(hdmi, 0x0000, HDMI_DFLT_CHL0_DAT);
382 hdmi_write(hdmi, 0x0000, HDMI_DFLT_CHL1_DAT);
383 hdmi_write(hdmi, 0x0060, HDMI_DFLT_CHL2_DAT);
384
385 /* Disable/unprepare hdmi clock */
386 clk_disable_unprepare(hdmi->clk_phy);
387 clk_disable_unprepare(hdmi->clk_tmds);
388 clk_disable_unprepare(hdmi->clk_pix);
389
390 hdmi->enabled = false;
391}
392
393static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
394{
395 struct sti_hdmi *hdmi = bridge->driver_private;
396
397 DRM_DEBUG_DRIVER("\n");
398
399 if (hdmi->enabled)
400 return;
401
402 /* Prepare/enable clocks */
403 if (clk_prepare_enable(hdmi->clk_pix))
404 DRM_ERROR("Failed to prepare/enable hdmi_pix clk\n");
405 if (clk_prepare_enable(hdmi->clk_tmds))
406 DRM_ERROR("Failed to prepare/enable hdmi_tmds clk\n");
407 if (clk_prepare_enable(hdmi->clk_phy))
408 DRM_ERROR("Failed to prepare/enable hdmi_rejec_pll clk\n");
409
410 hdmi->enabled = true;
411
412 /* Program hdmi serializer and start phy */
413 if (!hdmi->phy_ops->start(hdmi)) {
414 DRM_ERROR("Unable to start hdmi phy\n");
415 return;
416 }
417
418 /* Program hdmi active area */
419 hdmi_active_area(hdmi);
420
421 /* Enable working interrupts */
422 hdmi_write(hdmi, HDMI_WORKING_INT, HDMI_INT_EN);
423
424 /* Program hdmi config */
425 hdmi_config(hdmi);
426
427 /* Program AVI infoframe */
428 if (hdmi_avi_infoframe_config(hdmi))
429 DRM_ERROR("Unable to configure AVI infoframe\n");
430
431 /* Sw reset */
432 hdmi_swreset(hdmi);
433}
434
435static void sti_hdmi_set_mode(struct drm_bridge *bridge,
436 struct drm_display_mode *mode,
437 struct drm_display_mode *adjusted_mode)
438{
439 struct sti_hdmi *hdmi = bridge->driver_private;
440 int ret;
441
442 DRM_DEBUG_DRIVER("\n");
443
444 /* Copy the drm display mode in the connector local structure */
445 memcpy(&hdmi->mode, mode, sizeof(struct drm_display_mode));
446
447 /* Update clock framerate according to the selected mode */
448 ret = clk_set_rate(hdmi->clk_pix, mode->clock * 1000);
449 if (ret < 0) {
450 DRM_ERROR("Cannot set rate (%dHz) for hdmi_pix clk\n",
451 mode->clock * 1000);
452 return;
453 }
454 ret = clk_set_rate(hdmi->clk_phy, mode->clock * 1000);
455 if (ret < 0) {
456 DRM_ERROR("Cannot set rate (%dHz) for hdmi_rejection_pll clk\n",
457 mode->clock * 1000);
458 return;
459 }
460}
461
462static void sti_hdmi_bridge_nope(struct drm_bridge *bridge)
463{
464 /* do nothing */
465}
466
467static void sti_hdmi_brigde_destroy(struct drm_bridge *bridge)
468{
469 drm_bridge_cleanup(bridge);
470 kfree(bridge);
471}
472
473static const struct drm_bridge_funcs sti_hdmi_bridge_funcs = {
474 .pre_enable = sti_hdmi_pre_enable,
475 .enable = sti_hdmi_bridge_nope,
476 .disable = sti_hdmi_disable,
477 .post_disable = sti_hdmi_bridge_nope,
478 .mode_set = sti_hdmi_set_mode,
479 .destroy = sti_hdmi_brigde_destroy,
480};
481
482static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
483{
484 struct i2c_adapter *i2c_adap;
485 struct edid *edid;
486 int count;
487
488 DRM_DEBUG_DRIVER("\n");
489
490 i2c_adap = i2c_get_adapter(1);
491 if (!i2c_adap)
492 goto fail;
493
494 edid = drm_get_edid(connector, i2c_adap);
495 if (!edid)
496 goto fail;
497
498 count = drm_add_edid_modes(connector, edid);
499 drm_mode_connector_update_edid_property(connector, edid);
500
501 kfree(edid);
502 return count;
503
504fail:
505 DRM_ERROR("Can not read HDMI EDID\n");
506 return 0;
507}
508
509#define CLK_TOLERANCE_HZ 50
510
511static int sti_hdmi_connector_mode_valid(struct drm_connector *connector,
512 struct drm_display_mode *mode)
513{
514 int target = mode->clock * 1000;
515 int target_min = target - CLK_TOLERANCE_HZ;
516 int target_max = target + CLK_TOLERANCE_HZ;
517 int result;
518 struct sti_hdmi_connector *hdmi_connector
519 = to_sti_hdmi_connector(connector);
520 struct sti_hdmi *hdmi = hdmi_connector->hdmi;
521
522
523 result = clk_round_rate(hdmi->clk_pix, target);
524
525 DRM_DEBUG_DRIVER("target rate = %d => available rate = %d\n",
526 target, result);
527
528 if ((result < target_min) || (result > target_max)) {
529 DRM_DEBUG_DRIVER("hdmi pixclk=%d not supported\n", target);
530 return MODE_BAD;
531 }
532
533 return MODE_OK;
534}
535
536struct drm_encoder *sti_hdmi_best_encoder(struct drm_connector *connector)
537{
538 struct sti_hdmi_connector *hdmi_connector
539 = to_sti_hdmi_connector(connector);
540
541 /* Best encoder is the one associated during connector creation */
542 return hdmi_connector->encoder;
543}
544
545static struct drm_connector_helper_funcs sti_hdmi_connector_helper_funcs = {
546 .get_modes = sti_hdmi_connector_get_modes,
547 .mode_valid = sti_hdmi_connector_mode_valid,
548 .best_encoder = sti_hdmi_best_encoder,
549};
550
551/* get detection status of display device */
552static enum drm_connector_status
553sti_hdmi_connector_detect(struct drm_connector *connector, bool force)
554{
555 struct sti_hdmi_connector *hdmi_connector
556 = to_sti_hdmi_connector(connector);
557 struct sti_hdmi *hdmi = hdmi_connector->hdmi;
558
559 DRM_DEBUG_DRIVER("\n");
560
561 if (hdmi->hpd) {
562 DRM_DEBUG_DRIVER("hdmi cable connected\n");
563 return connector_status_connected;
564 }
565
566 DRM_DEBUG_DRIVER("hdmi cable disconnected\n");
567 return connector_status_disconnected;
568}
569
570static void sti_hdmi_connector_destroy(struct drm_connector *connector)
571{
572 struct sti_hdmi_connector *hdmi_connector
573 = to_sti_hdmi_connector(connector);
574
575 drm_connector_unregister(connector);
576 drm_connector_cleanup(connector);
577 kfree(hdmi_connector);
578}
579
580static struct drm_connector_funcs sti_hdmi_connector_funcs = {
581 .dpms = drm_helper_connector_dpms,
582 .fill_modes = drm_helper_probe_single_connector_modes,
583 .detect = sti_hdmi_connector_detect,
584 .destroy = sti_hdmi_connector_destroy,
585};
586
587static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
588{
589 struct drm_encoder *encoder;
590
591 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
592 if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
593 return encoder;
594 }
595
596 return NULL;
597}
598
599static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
600{
601 struct sti_hdmi *hdmi = dev_get_drvdata(dev);
602 struct drm_device *drm_dev = data;
603 struct drm_encoder *encoder;
604 struct sti_hdmi_connector *connector;
605 struct drm_connector *drm_connector;
606 struct drm_bridge *bridge;
607 struct i2c_adapter *i2c_adap;
608 int err;
609
610 i2c_adap = i2c_get_adapter(1);
611 if (!i2c_adap)
612 return -EPROBE_DEFER;
613
614 /* Set the drm device handle */
615 hdmi->drm_dev = drm_dev;
616
617 encoder = sti_hdmi_find_encoder(drm_dev);
618 if (!encoder)
619 return -ENOMEM;
620
621 connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
622 if (!connector)
623 return -ENOMEM;
624
625 connector->hdmi = hdmi;
626
627 bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
628 if (!bridge)
629 return -ENOMEM;
630
631 bridge->driver_private = hdmi;
632 drm_bridge_init(drm_dev, bridge, &sti_hdmi_bridge_funcs);
633
634 encoder->bridge = bridge;
635 connector->encoder = encoder;
636
637 drm_connector = (struct drm_connector *)connector;
638
639 drm_connector->polled = DRM_CONNECTOR_POLL_HPD;
640
641 drm_connector_init(drm_dev, drm_connector,
642 &sti_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
643 drm_connector_helper_add(drm_connector,
644 &sti_hdmi_connector_helper_funcs);
645
646 err = drm_connector_register(drm_connector);
647 if (err)
648 goto err_connector;
649
650 err = drm_mode_connector_attach_encoder(drm_connector, encoder);
651 if (err) {
652 DRM_ERROR("Failed to attach a connector to a encoder\n");
653 goto err_sysfs;
654 }
655
656 /* Enable default interrupts */
657 hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
658
659 return 0;
660
661err_sysfs:
662 drm_connector_unregister(drm_connector);
663err_connector:
664 drm_bridge_cleanup(bridge);
665 drm_connector_cleanup(drm_connector);
666 return -EINVAL;
667}
668
669static void sti_hdmi_unbind(struct device *dev,
670 struct device *master, void *data)
671{
672 /* do nothing */
673}
674
675static const struct component_ops sti_hdmi_ops = {
676 .bind = sti_hdmi_bind,
677 .unbind = sti_hdmi_unbind,
678};
679
680static struct of_device_id hdmi_of_match[] = {
681 {
682 .compatible = "st,stih416-hdmi",
683 .data = &tx3g0c55phy_ops,
684 }, {
685 .compatible = "st,stih407-hdmi",
686 .data = &tx3g4c28phy_ops,
687 }, {
688 /* end node */
689 }
690};
691MODULE_DEVICE_TABLE(of, hdmi_of_match);
692
693static int sti_hdmi_probe(struct platform_device *pdev)
694{
695 struct device *dev = &pdev->dev;
696 struct sti_hdmi *hdmi;
697 struct device_node *np = dev->of_node;
698 struct resource *res;
699 int ret;
700
701 DRM_INFO("%s\n", __func__);
702
703 hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
704 if (!hdmi)
705 return -ENOMEM;
706
707 hdmi->dev = pdev->dev;
708
709 /* Get resources */
710 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi-reg");
711 if (!res) {
712 DRM_ERROR("Invalid hdmi resource\n");
713 return -ENOMEM;
714 }
715 hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
716 if (IS_ERR(hdmi->regs))
717 return PTR_ERR(hdmi->regs);
718
719 if (of_device_is_compatible(np, "st,stih416-hdmi")) {
720 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
721 "syscfg");
722 if (!res) {
723 DRM_ERROR("Invalid syscfg resource\n");
724 return -ENOMEM;
725 }
726 hdmi->syscfg = devm_ioremap_nocache(dev, res->start,
727 resource_size(res));
728 if (IS_ERR(hdmi->syscfg))
729 return PTR_ERR(hdmi->syscfg);
730
731 }
732
733 hdmi->phy_ops = (struct hdmi_phy_ops *)
734 of_match_node(hdmi_of_match, np)->data;
735
736 /* Get clock resources */
737 hdmi->clk_pix = devm_clk_get(dev, "pix");
738 if (IS_ERR(hdmi->clk_pix)) {
739 DRM_ERROR("Cannot get hdmi_pix clock\n");
740 return PTR_ERR(hdmi->clk_pix);
741 }
742
743 hdmi->clk_tmds = devm_clk_get(dev, "tmds");
744 if (IS_ERR(hdmi->clk_tmds)) {
745 DRM_ERROR("Cannot get hdmi_tmds clock\n");
746 return PTR_ERR(hdmi->clk_tmds);
747 }
748
749 hdmi->clk_phy = devm_clk_get(dev, "phy");
750 if (IS_ERR(hdmi->clk_phy)) {
751 DRM_ERROR("Cannot get hdmi_phy clock\n");
752 return PTR_ERR(hdmi->clk_phy);
753 }
754
755 hdmi->clk_audio = devm_clk_get(dev, "audio");
756 if (IS_ERR(hdmi->clk_audio)) {
757 DRM_ERROR("Cannot get hdmi_audio clock\n");
758 return PTR_ERR(hdmi->clk_audio);
759 }
760
761 hdmi->hpd_gpio = of_get_named_gpio(np, "hdmi,hpd-gpio", 0);
762 if (hdmi->hpd_gpio < 0) {
763 DRM_ERROR("Failed to get hdmi hpd-gpio\n");
764 return -EIO;
765 }
766
767 hdmi->hpd = gpio_get_value(hdmi->hpd_gpio);
768
769 init_waitqueue_head(&hdmi->wait_event);
770
771 hdmi->irq = platform_get_irq_byname(pdev, "irq");
772
773 ret = devm_request_threaded_irq(dev, hdmi->irq, hdmi_irq,
774 hdmi_irq_thread, IRQF_ONESHOT, dev_name(dev), hdmi);
775 if (ret) {
776 DRM_ERROR("Failed to register HDMI interrupt\n");
777 return ret;
778 }
779
780 hdmi->reset = devm_reset_control_get(dev, "hdmi");
781 /* Take hdmi out of reset */
782 if (!IS_ERR(hdmi->reset))
783 reset_control_deassert(hdmi->reset);
784
785 platform_set_drvdata(pdev, hdmi);
786
787 return component_add(&pdev->dev, &sti_hdmi_ops);
788}
789
790static int sti_hdmi_remove(struct platform_device *pdev)
791{
792 component_del(&pdev->dev, &sti_hdmi_ops);
793 return 0;
794}
795
796struct platform_driver sti_hdmi_driver = {
797 .driver = {
798 .name = "sti-hdmi",
799 .owner = THIS_MODULE,
800 .of_match_table = hdmi_of_match,
801 },
802 .probe = sti_hdmi_probe,
803 .remove = sti_hdmi_remove,
804};
805
806module_platform_driver(sti_hdmi_driver);
807
808MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
809MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
810MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
new file mode 100644
index 000000000000..61bec6557ceb
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -0,0 +1,88 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_HDMI_H_
8#define _STI_HDMI_H_
9
10#include <linux/platform_device.h>
11
12#include <drm/drmP.h>
13
14#define HDMI_STA 0x0010
15#define HDMI_STA_DLL_LCK BIT(5)
16
17struct sti_hdmi;
18
19struct hdmi_phy_ops {
20 bool (*start)(struct sti_hdmi *hdmi);
21 void (*stop)(struct sti_hdmi *hdmi);
22};
23
24/**
25 * STI hdmi structure
26 *
27 * @dev: driver device
28 * @drm_dev: pointer to drm device
29 * @mode: current display mode selected
30 * @regs: hdmi register
31 * @syscfg: syscfg register for pll rejection configuration
32 * @clk_pix: hdmi pixel clock
33 * @clk_tmds: hdmi tmds clock
34 * @clk_phy: hdmi phy clock
35 * @clk_audio: hdmi audio clock
36 * @irq: hdmi interrupt number
37 * @irq_status: interrupt status register
38 * @phy_ops: phy start/stop operations
39 * @enabled: true if hdmi is enabled else false
40 * @hpd_gpio: hdmi hot plug detect gpio number
41 * @hpd: hot plug detect status
42 * @wait_event: wait event
43 * @event_received: wait event status
44 * @reset: reset control of the hdmi phy
45 */
46struct sti_hdmi {
47 struct device dev;
48 struct drm_device *drm_dev;
49 struct drm_display_mode mode;
50 void __iomem *regs;
51 void __iomem *syscfg;
52 struct clk *clk_pix;
53 struct clk *clk_tmds;
54 struct clk *clk_phy;
55 struct clk *clk_audio;
56 int irq;
57 u32 irq_status;
58 struct hdmi_phy_ops *phy_ops;
59 bool enabled;
60 int hpd_gpio;
61 bool hpd;
62 wait_queue_head_t wait_event;
63 bool event_received;
64 struct reset_control *reset;
65};
66
67u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
68void hdmi_write(struct sti_hdmi *hdmi, u32 val, int offset);
69
70/**
71 * hdmi phy config structure
72 *
73 * A pointer to an array of these structures is passed to a TMDS (HDMI) output
74 * via the control interface to provide board and SoC specific
75 * configurations of the HDMI PHY. Each entry in the array specifies a hardware
76 * specific configuration for a given TMDS clock frequency range.
77 *
78 * @min_tmds_freq: Lower bound of TMDS clock frequency this entry applies to
79 * @max_tmds_freq: Upper bound of TMDS clock frequency this entry applies to
80 * @config: SoC specific register configuration
81 */
82struct hdmi_phy_config {
83 u32 min_tmds_freq;
84 u32 max_tmds_freq;
85 u32 config[4];
86};
87
88#endif
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c
new file mode 100644
index 000000000000..49ae8e44b285
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c
@@ -0,0 +1,336 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#include "sti_hdmi_tx3g0c55phy.h"
8
9#define HDMI_SRZ_PLL_CFG 0x0504
10#define HDMI_SRZ_TAP_1 0x0508
11#define HDMI_SRZ_TAP_2 0x050C
12#define HDMI_SRZ_TAP_3 0x0510
13#define HDMI_SRZ_CTRL 0x0514
14
15#define HDMI_SRZ_PLL_CFG_POWER_DOWN BIT(0)
16#define HDMI_SRZ_PLL_CFG_VCOR_SHIFT 1
17#define HDMI_SRZ_PLL_CFG_VCOR_425MHZ 0
18#define HDMI_SRZ_PLL_CFG_VCOR_850MHZ 1
19#define HDMI_SRZ_PLL_CFG_VCOR_1700MHZ 2
20#define HDMI_SRZ_PLL_CFG_VCOR_3000MHZ 3
21#define HDMI_SRZ_PLL_CFG_VCOR_MASK 3
22#define HDMI_SRZ_PLL_CFG_VCOR(x) (x << HDMI_SRZ_PLL_CFG_VCOR_SHIFT)
23#define HDMI_SRZ_PLL_CFG_NDIV_SHIFT 8
24#define HDMI_SRZ_PLL_CFG_NDIV_MASK (0x1F << HDMI_SRZ_PLL_CFG_NDIV_SHIFT)
25#define HDMI_SRZ_PLL_CFG_MODE_SHIFT 16
26#define HDMI_SRZ_PLL_CFG_MODE_13_5_MHZ 0x1
27#define HDMI_SRZ_PLL_CFG_MODE_25_2_MHZ 0x4
28#define HDMI_SRZ_PLL_CFG_MODE_27_MHZ 0x5
29#define HDMI_SRZ_PLL_CFG_MODE_33_75_MHZ 0x6
30#define HDMI_SRZ_PLL_CFG_MODE_40_5_MHZ 0x7
31#define HDMI_SRZ_PLL_CFG_MODE_54_MHZ 0x8
32#define HDMI_SRZ_PLL_CFG_MODE_67_5_MHZ 0x9
33#define HDMI_SRZ_PLL_CFG_MODE_74_25_MHZ 0xA
34#define HDMI_SRZ_PLL_CFG_MODE_81_MHZ 0xB
35#define HDMI_SRZ_PLL_CFG_MODE_82_5_MHZ 0xC
36#define HDMI_SRZ_PLL_CFG_MODE_108_MHZ 0xD
37#define HDMI_SRZ_PLL_CFG_MODE_148_5_MHZ 0xE
38#define HDMI_SRZ_PLL_CFG_MODE_165_MHZ 0xF
39#define HDMI_SRZ_PLL_CFG_MODE_MASK 0xF
40#define HDMI_SRZ_PLL_CFG_MODE(x) (x << HDMI_SRZ_PLL_CFG_MODE_SHIFT)
41
42#define HDMI_SRZ_CTRL_POWER_DOWN (1 << 0)
43#define HDMI_SRZ_CTRL_EXTERNAL_DATA_EN (1 << 1)
44
45/* sysconf registers */
46#define HDMI_REJECTION_PLL_CONFIGURATION 0x0858 /* SYSTEM_CONFIG2534 */
47#define HDMI_REJECTION_PLL_STATUS 0x0948 /* SYSTEM_CONFIG2594 */
48
49#define REJECTION_PLL_HDMI_ENABLE_SHIFT 0
50#define REJECTION_PLL_HDMI_ENABLE_MASK (0x1 << REJECTION_PLL_HDMI_ENABLE_SHIFT)
51#define REJECTION_PLL_HDMI_PDIV_SHIFT 24
52#define REJECTION_PLL_HDMI_PDIV_MASK (0x7 << REJECTION_PLL_HDMI_PDIV_SHIFT)
53#define REJECTION_PLL_HDMI_NDIV_SHIFT 16
54#define REJECTION_PLL_HDMI_NDIV_MASK (0xFF << REJECTION_PLL_HDMI_NDIV_SHIFT)
55#define REJECTION_PLL_HDMI_MDIV_SHIFT 8
56#define REJECTION_PLL_HDMI_MDIV_MASK (0xFF << REJECTION_PLL_HDMI_MDIV_SHIFT)
57
58#define REJECTION_PLL_HDMI_REJ_PLL_LOCK BIT(0)
59
60#define HDMI_TIMEOUT_PLL_LOCK 50 /*milliseconds */
61
62/**
63 * pll mode structure
64 *
65 * A pointer to an array of these structures is passed to a TMDS (HDMI) output
66 * via the control interface to provide board and SoC specific
67 * configurations of the HDMI PHY. Each entry in the array specifies a hardware
68 * specific configuration for a given TMDS clock frequency range. The array
69 * should be terminated with an entry that has all fields set to zero.
70 *
71 * @min: Lower bound of TMDS clock frequency this entry applies to
72 * @max: Upper bound of TMDS clock frequency this entry applies to
73 * @mode: SoC specific register configuration
74 */
75struct pllmode {
76 u32 min;
77 u32 max;
78 u32 mode;
79};
80
81#define NB_PLL_MODE 7
82static struct pllmode pllmodes[NB_PLL_MODE] = {
83 {13500000, 13513500, HDMI_SRZ_PLL_CFG_MODE_13_5_MHZ},
84 {25174800, 25200000, HDMI_SRZ_PLL_CFG_MODE_25_2_MHZ},
85 {27000000, 27027000, HDMI_SRZ_PLL_CFG_MODE_27_MHZ},
86 {54000000, 54054000, HDMI_SRZ_PLL_CFG_MODE_54_MHZ},
87 {72000000, 74250000, HDMI_SRZ_PLL_CFG_MODE_74_25_MHZ},
88 {108000000, 108108000, HDMI_SRZ_PLL_CFG_MODE_108_MHZ},
89 {148351648, 297000000, HDMI_SRZ_PLL_CFG_MODE_148_5_MHZ}
90};
91
92#define NB_HDMI_PHY_CONFIG 5
93static struct hdmi_phy_config hdmiphy_config[NB_HDMI_PHY_CONFIG] = {
94 {0, 40000000, {0x00101010, 0x00101010, 0x00101010, 0x02} },
95 {40000000, 140000000, {0x00111111, 0x00111111, 0x00111111, 0x02} },
96 {140000000, 160000000, {0x00131313, 0x00101010, 0x00101010, 0x02} },
97 {160000000, 250000000, {0x00131313, 0x00111111, 0x00111111, 0x03FE} },
98 {250000000, 300000000, {0x00151515, 0x00101010, 0x00101010, 0x03FE} },
99};
100
101#define PLL_CHANGE_DELAY 1 /* ms */
102
103/**
104 * Disable the pll rejection
105 *
106 * @hdmi: pointer on the hdmi internal structure
107 *
108 * return true if the pll has been disabled
109 */
110static bool disable_pll_rejection(struct sti_hdmi *hdmi)
111{
112 u32 val;
113
114 DRM_DEBUG_DRIVER("\n");
115
116 val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
117 val &= ~REJECTION_PLL_HDMI_ENABLE_MASK;
118 writel(val, hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
119
120 msleep(PLL_CHANGE_DELAY);
121 val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_STATUS);
122
123 return !(val & REJECTION_PLL_HDMI_REJ_PLL_LOCK);
124}
125
126/**
127 * Enable the old BCH/rejection PLL is now reused to provide the CLKPXPLL
128 * clock input to the new PHY PLL that generates the serializer clock
129 * (TMDS*10) and the TMDS clock which is now fed back into the HDMI
130 * formatter instead of the TMDS clock line from ClockGenB.
131 *
132 * @hdmi: pointer on the hdmi internal structure
133 *
134 * return true if pll has been correctly set
135 */
136static bool enable_pll_rejection(struct sti_hdmi *hdmi)
137{
138 unsigned int inputclock;
139 u32 mdiv, ndiv, pdiv, val;
140
141 DRM_DEBUG_DRIVER("\n");
142
143 if (!disable_pll_rejection(hdmi))
144 return false;
145
146 inputclock = hdmi->mode.clock * 1000;
147
148 DRM_DEBUG_DRIVER("hdmi rejection pll input clock = %dHz\n", inputclock);
149
150
151 /* Power up the HDMI rejection PLL
152 * Note: On this SoC (stiH416) we are forced to have the input clock
153 * be equal to the HDMI pixel clock.
154 *
155 * The values here have been suggested by validation however they are
156 * still provisional and subject to change.
157 *
158 * PLLout = (Fin*Mdiv) / ((2 * Ndiv) / 2^Pdiv)
159 */
160 if (inputclock < 50000000) {
161 /*
162 * For slower clocks we need to multiply more to keep the
163 * internal VCO frequency within the physical specification
164 * of the PLL.
165 */
166 pdiv = 4;
167 ndiv = 240;
168 mdiv = 30;
169 } else {
170 pdiv = 2;
171 ndiv = 60;
172 mdiv = 30;
173 }
174
175 val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
176
177 val &= ~(REJECTION_PLL_HDMI_PDIV_MASK |
178 REJECTION_PLL_HDMI_NDIV_MASK |
179 REJECTION_PLL_HDMI_MDIV_MASK |
180 REJECTION_PLL_HDMI_ENABLE_MASK);
181
182 val |= (pdiv << REJECTION_PLL_HDMI_PDIV_SHIFT) |
183 (ndiv << REJECTION_PLL_HDMI_NDIV_SHIFT) |
184 (mdiv << REJECTION_PLL_HDMI_MDIV_SHIFT) |
185 (0x1 << REJECTION_PLL_HDMI_ENABLE_SHIFT);
186
187 writel(val, hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
188
189 msleep(PLL_CHANGE_DELAY);
190 val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_STATUS);
191
192 return (val & REJECTION_PLL_HDMI_REJ_PLL_LOCK);
193}
194
195/**
196 * Start hdmi phy macro cell tx3g0c55
197 *
198 * @hdmi: pointer on the hdmi internal structure
199 *
200 * Return false if an error occur
201 */
202static bool sti_hdmi_tx3g0c55phy_start(struct sti_hdmi *hdmi)
203{
204 u32 ckpxpll = hdmi->mode.clock * 1000;
205 u32 val, tmdsck, freqvco, pllctrl = 0;
206 unsigned int i;
207
208 if (!enable_pll_rejection(hdmi))
209 return false;
210
211 DRM_DEBUG_DRIVER("ckpxpll = %dHz\n", ckpxpll);
212
213 /* Assuming no pixel repetition and 24bits color */
214 tmdsck = ckpxpll;
215 pllctrl = 2 << HDMI_SRZ_PLL_CFG_NDIV_SHIFT;
216
217 /*
218 * Setup the PLL mode parameter based on the ckpxpll. If we haven't got
219 * a clock frequency supported by one of the specific PLL modes then we
220 * will end up using the generic mode (0) which only supports a 10x
221 * multiplier, hence only 24bit color.
222 */
223 for (i = 0; i < NB_PLL_MODE; i++) {
224 if (ckpxpll >= pllmodes[i].min && ckpxpll <= pllmodes[i].max)
225 pllctrl |= HDMI_SRZ_PLL_CFG_MODE(pllmodes[i].mode);
226 }
227
228 freqvco = tmdsck * 10;
229 if (freqvco <= 425000000UL)
230 pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_425MHZ);
231 else if (freqvco <= 850000000UL)
232 pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_850MHZ);
233 else if (freqvco <= 1700000000UL)
234 pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_1700MHZ);
235 else if (freqvco <= 2970000000UL)
236 pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_3000MHZ);
237 else {
238 DRM_ERROR("PHY serializer clock out of range\n");
239 goto err;
240 }
241
242 /*
243 * Configure and power up the PHY PLL
244 */
245 hdmi->event_received = false;
246 DRM_DEBUG_DRIVER("pllctrl = 0x%x\n", pllctrl);
247 hdmi_write(hdmi, pllctrl, HDMI_SRZ_PLL_CFG);
248
249 /* wait PLL interrupt */
250 wait_event_interruptible_timeout(hdmi->wait_event,
251 hdmi->event_received == true,
252 msecs_to_jiffies
253 (HDMI_TIMEOUT_PLL_LOCK));
254
255 if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK) == 0) {
256 DRM_ERROR("hdmi phy pll not locked\n");
257 goto err;
258 }
259
260 DRM_DEBUG_DRIVER("got PHY PLL Lock\n");
261
262 /*
263 * To configure the source termination and pre-emphasis appropriately
264 * for different high speed TMDS clock frequencies a phy configuration
265 * table must be provided, tailored to the SoC and board combination.
266 */
267 for (i = 0; i < NB_HDMI_PHY_CONFIG; i++) {
268 if ((hdmiphy_config[i].min_tmds_freq <= tmdsck) &&
269 (hdmiphy_config[i].max_tmds_freq >= tmdsck)) {
270 val = hdmiphy_config[i].config[0];
271 hdmi_write(hdmi, val, HDMI_SRZ_TAP_1);
272 val = hdmiphy_config[i].config[1];
273 hdmi_write(hdmi, val, HDMI_SRZ_TAP_2);
274 val = hdmiphy_config[i].config[2];
275 hdmi_write(hdmi, val, HDMI_SRZ_TAP_3);
276 val = hdmiphy_config[i].config[3];
277 val |= HDMI_SRZ_CTRL_EXTERNAL_DATA_EN;
278 val &= ~HDMI_SRZ_CTRL_POWER_DOWN;
279 hdmi_write(hdmi, val, HDMI_SRZ_CTRL);
280
281 DRM_DEBUG_DRIVER("serializer cfg 0x%x 0x%x 0x%x 0x%x\n",
282 hdmiphy_config[i].config[0],
283 hdmiphy_config[i].config[1],
284 hdmiphy_config[i].config[2],
285 hdmiphy_config[i].config[3]);
286 return true;
287 }
288 }
289
290 /*
291 * Default, power up the serializer with no pre-emphasis or source
292 * termination.
293 */
294 hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_1);
295 hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_2);
296 hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_3);
297 hdmi_write(hdmi, HDMI_SRZ_CTRL_EXTERNAL_DATA_EN, HDMI_SRZ_CTRL);
298
299 return true;
300
301err:
302 disable_pll_rejection(hdmi);
303
304 return false;
305}
306
307/**
308 * Stop hdmi phy macro cell tx3g0c55
309 *
310 * @hdmi: pointer on the hdmi internal structure
311 */
312static void sti_hdmi_tx3g0c55phy_stop(struct sti_hdmi *hdmi)
313{
314 DRM_DEBUG_DRIVER("\n");
315
316 hdmi->event_received = false;
317
318 hdmi_write(hdmi, HDMI_SRZ_CTRL_POWER_DOWN, HDMI_SRZ_CTRL);
319 hdmi_write(hdmi, HDMI_SRZ_PLL_CFG_POWER_DOWN, HDMI_SRZ_PLL_CFG);
320
321 /* wait PLL interrupt */
322 wait_event_interruptible_timeout(hdmi->wait_event,
323 hdmi->event_received == true,
324 msecs_to_jiffies
325 (HDMI_TIMEOUT_PLL_LOCK));
326
327 if (hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK)
328 DRM_ERROR("hdmi phy pll not well disabled\n");
329
330 disable_pll_rejection(hdmi);
331}
332
333struct hdmi_phy_ops tx3g0c55phy_ops = {
334 .start = sti_hdmi_tx3g0c55phy_start,
335 .stop = sti_hdmi_tx3g0c55phy_stop,
336};
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h
new file mode 100644
index 000000000000..068237b3a303
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h
@@ -0,0 +1,14 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_HDMI_TX3G0C55PHY_H_
8#define _STI_HDMI_TX3G0C55PHY_H_
9
10#include "sti_hdmi.h"
11
12extern struct hdmi_phy_ops tx3g0c55phy_ops;
13
14#endif
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
new file mode 100644
index 000000000000..8e0ceb0ced33
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
@@ -0,0 +1,211 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#include "sti_hdmi_tx3g4c28phy.h"
8
9#define HDMI_SRZ_CFG 0x504
10#define HDMI_SRZ_PLL_CFG 0x510
11#define HDMI_SRZ_ICNTL 0x518
12#define HDMI_SRZ_CALCODE_EXT 0x520
13
14#define HDMI_SRZ_CFG_EN BIT(0)
15#define HDMI_SRZ_CFG_DISABLE_BYPASS_SINK_CURRENT BIT(1)
16#define HDMI_SRZ_CFG_EXTERNAL_DATA BIT(16)
17#define HDMI_SRZ_CFG_RBIAS_EXT BIT(17)
18#define HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION BIT(18)
19#define HDMI_SRZ_CFG_EN_BIASRES_DETECTION BIT(19)
20#define HDMI_SRZ_CFG_EN_SRC_TERMINATION BIT(24)
21
22#define HDMI_SRZ_CFG_INTERNAL_MASK (HDMI_SRZ_CFG_EN | \
23 HDMI_SRZ_CFG_DISABLE_BYPASS_SINK_CURRENT | \
24 HDMI_SRZ_CFG_EXTERNAL_DATA | \
25 HDMI_SRZ_CFG_RBIAS_EXT | \
26 HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION | \
27 HDMI_SRZ_CFG_EN_BIASRES_DETECTION | \
28 HDMI_SRZ_CFG_EN_SRC_TERMINATION)
29
30#define PLL_CFG_EN BIT(0)
31#define PLL_CFG_NDIV_SHIFT (8)
32#define PLL_CFG_IDF_SHIFT (16)
33#define PLL_CFG_ODF_SHIFT (24)
34
35#define ODF_DIV_1 (0)
36#define ODF_DIV_2 (1)
37#define ODF_DIV_4 (2)
38#define ODF_DIV_8 (3)
39
40#define HDMI_TIMEOUT_PLL_LOCK 50 /*milliseconds */
41
42struct plldividers_s {
43 uint32_t min;
44 uint32_t max;
45 uint32_t idf;
46 uint32_t odf;
47};
48
49/*
50 * Functional specification recommended values
51 */
52#define NB_PLL_MODE 5
53static struct plldividers_s plldividers[NB_PLL_MODE] = {
54 {0, 20000000, 1, ODF_DIV_8},
55 {20000000, 42500000, 2, ODF_DIV_8},
56 {42500000, 85000000, 4, ODF_DIV_4},
57 {85000000, 170000000, 8, ODF_DIV_2},
58 {170000000, 340000000, 16, ODF_DIV_1}
59};
60
61#define NB_HDMI_PHY_CONFIG 2
62static struct hdmi_phy_config hdmiphy_config[NB_HDMI_PHY_CONFIG] = {
63 {0, 250000000, {0x0, 0x0, 0x0, 0x0} },
64 {250000000, 300000000, {0x1110, 0x0, 0x0, 0x0} },
65};
66
67/**
68 * Start hdmi phy macro cell tx3g4c28
69 *
70 * @hdmi: pointer on the hdmi internal structure
71 *
72 * Return false if an error occur
73 */
74static bool sti_hdmi_tx3g4c28phy_start(struct sti_hdmi *hdmi)
75{
76 u32 ckpxpll = hdmi->mode.clock * 1000;
77 u32 val, tmdsck, idf, odf, pllctrl = 0;
78 bool foundplldivides = false;
79 int i;
80
81 DRM_DEBUG_DRIVER("ckpxpll = %dHz\n", ckpxpll);
82
83 for (i = 0; i < NB_PLL_MODE; i++) {
84 if (ckpxpll >= plldividers[i].min &&
85 ckpxpll < plldividers[i].max) {
86 idf = plldividers[i].idf;
87 odf = plldividers[i].odf;
88 foundplldivides = true;
89 break;
90 }
91 }
92
93 if (!foundplldivides) {
94 DRM_ERROR("input TMDS clock speed (%d) not supported\n",
95 ckpxpll);
96 goto err;
97 }
98
99 /* Assuming no pixel repetition and 24bits color */
100 tmdsck = ckpxpll;
101 pllctrl |= 40 << PLL_CFG_NDIV_SHIFT;
102
103 if (tmdsck > 340000000) {
104 DRM_ERROR("output TMDS clock (%d) out of range\n", tmdsck);
105 goto err;
106 }
107
108 pllctrl |= idf << PLL_CFG_IDF_SHIFT;
109 pllctrl |= odf << PLL_CFG_ODF_SHIFT;
110
111 /*
112 * Configure and power up the PHY PLL
113 */
114 hdmi->event_received = false;
115 DRM_DEBUG_DRIVER("pllctrl = 0x%x\n", pllctrl);
116 hdmi_write(hdmi, (pllctrl | PLL_CFG_EN), HDMI_SRZ_PLL_CFG);
117
118 /* wait PLL interrupt */
119 wait_event_interruptible_timeout(hdmi->wait_event,
120 hdmi->event_received == true,
121 msecs_to_jiffies
122 (HDMI_TIMEOUT_PLL_LOCK));
123
124 if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK) == 0) {
125 DRM_ERROR("hdmi phy pll not locked\n");
126 goto err;
127 }
128
129 DRM_DEBUG_DRIVER("got PHY PLL Lock\n");
130
131 val = (HDMI_SRZ_CFG_EN |
132 HDMI_SRZ_CFG_EXTERNAL_DATA |
133 HDMI_SRZ_CFG_EN_BIASRES_DETECTION |
134 HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION);
135
136 if (tmdsck > 165000000)
137 val |= HDMI_SRZ_CFG_EN_SRC_TERMINATION;
138
139 /*
140 * To configure the source termination and pre-emphasis appropriately
141 * for different high speed TMDS clock frequencies a phy configuration
142 * table must be provided, tailored to the SoC and board combination.
143 */
144 for (i = 0; i < NB_HDMI_PHY_CONFIG; i++) {
145 if ((hdmiphy_config[i].min_tmds_freq <= tmdsck) &&
146 (hdmiphy_config[i].max_tmds_freq >= tmdsck)) {
147 val |= (hdmiphy_config[i].config[0]
148 & ~HDMI_SRZ_CFG_INTERNAL_MASK);
149 hdmi_write(hdmi, val, HDMI_SRZ_CFG);
150
151 val = hdmiphy_config[i].config[1];
152 hdmi_write(hdmi, val, HDMI_SRZ_ICNTL);
153
154 val = hdmiphy_config[i].config[2];
155 hdmi_write(hdmi, val, HDMI_SRZ_CALCODE_EXT);
156
157 DRM_DEBUG_DRIVER("serializer cfg 0x%x 0x%x 0x%x\n",
158 hdmiphy_config[i].config[0],
159 hdmiphy_config[i].config[1],
160 hdmiphy_config[i].config[2]);
161 return true;
162 }
163 }
164
165 /*
166 * Default, power up the serializer with no pre-emphasis or
167 * output swing correction
168 */
169 hdmi_write(hdmi, val, HDMI_SRZ_CFG);
170 hdmi_write(hdmi, 0x0, HDMI_SRZ_ICNTL);
171 hdmi_write(hdmi, 0x0, HDMI_SRZ_CALCODE_EXT);
172
173 return true;
174
175err:
176 return false;
177}
178
179/**
180 * Stop hdmi phy macro cell tx3g4c28
181 *
182 * @hdmi: pointer on the hdmi internal structure
183 */
184static void sti_hdmi_tx3g4c28phy_stop(struct sti_hdmi *hdmi)
185{
186 int val = 0;
187
188 DRM_DEBUG_DRIVER("\n");
189
190 hdmi->event_received = false;
191
192 val = HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION;
193 val |= HDMI_SRZ_CFG_EN_BIASRES_DETECTION;
194
195 hdmi_write(hdmi, val, HDMI_SRZ_CFG);
196 hdmi_write(hdmi, 0, HDMI_SRZ_PLL_CFG);
197
198 /* wait PLL interrupt */
199 wait_event_interruptible_timeout(hdmi->wait_event,
200 hdmi->event_received == true,
201 msecs_to_jiffies
202 (HDMI_TIMEOUT_PLL_LOCK));
203
204 if (hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK)
205 DRM_ERROR("hdmi phy pll not well disabled\n");
206}
207
208struct hdmi_phy_ops tx3g4c28phy_ops = {
209 .start = sti_hdmi_tx3g4c28phy_start,
210 .stop = sti_hdmi_tx3g4c28phy_stop,
211};
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h
new file mode 100644
index 000000000000..f99a7ff281ef
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h
@@ -0,0 +1,14 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_HDMI_TX3G4C28PHY_H_
8#define _STI_HDMI_TX3G4C28PHY_H_
9
10#include "sti_hdmi.h"
11
12extern struct hdmi_phy_ops tx3g4c28phy_ops;
13
14#endif
diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c
new file mode 100644
index 000000000000..06a587c4f1bb
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_layer.c
@@ -0,0 +1,197 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#include <drm/drmP.h>
10#include <drm/drm_gem_cma_helper.h>
11#include <drm/drm_fb_cma_helper.h>
12
13#include "sti_compositor.h"
14#include "sti_gdp.h"
15#include "sti_layer.h"
16#include "sti_vid.h"
17
18const char *sti_layer_to_str(struct sti_layer *layer)
19{
20 switch (layer->desc) {
21 case STI_GDP_0:
22 return "GDP0";
23 case STI_GDP_1:
24 return "GDP1";
25 case STI_GDP_2:
26 return "GDP2";
27 case STI_GDP_3:
28 return "GDP3";
29 case STI_VID_0:
30 return "VID0";
31 case STI_VID_1:
32 return "VID1";
33 case STI_CURSOR:
34 return "CURSOR";
35 default:
36 return "<UNKNOWN LAYER>";
37 }
38}
39
40struct sti_layer *sti_layer_create(struct device *dev, int desc,
41 void __iomem *baseaddr)
42{
43
44 struct sti_layer *layer = NULL;
45
46 switch (desc & STI_LAYER_TYPE_MASK) {
47 case STI_GDP:
48 layer = sti_gdp_create(dev, desc);
49 break;
50 case STI_VID:
51 layer = sti_vid_create(dev);
52 break;
53 }
54
55 if (!layer) {
56 DRM_ERROR("Failed to create layer\n");
57 return NULL;
58 }
59
60 layer->desc = desc;
61 layer->dev = dev;
62 layer->regs = baseaddr;
63
64 layer->ops->init(layer);
65
66 DRM_DEBUG_DRIVER("%s created\n", sti_layer_to_str(layer));
67
68 return layer;
69}
70
71int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
72 struct drm_display_mode *mode, int mixer_id,
73 int dest_x, int dest_y, int dest_w, int dest_h,
74 int src_x, int src_y, int src_w, int src_h)
75{
76 int ret;
77 unsigned int i;
78 struct drm_gem_cma_object *cma_obj;
79
80 if (!layer || !fb || !mode) {
81 DRM_ERROR("Null fb, layer or mode\n");
82 return 1;
83 }
84
85 cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
86 if (!cma_obj) {
87 DRM_ERROR("Can't get CMA GEM object for fb\n");
88 return 1;
89 }
90
91 layer->fb = fb;
92 layer->mode = mode;
93 layer->mixer_id = mixer_id;
94 layer->dst_x = dest_x;
95 layer->dst_y = dest_y;
96 layer->dst_w = clamp_val(dest_w, 0, mode->crtc_hdisplay - dest_x);
97 layer->dst_h = clamp_val(dest_h, 0, mode->crtc_vdisplay - dest_y);
98 layer->src_x = src_x;
99 layer->src_y = src_y;
100 layer->src_w = src_w;
101 layer->src_h = src_h;
102 layer->format = fb->pixel_format;
103 layer->paddr = cma_obj->paddr;
104 for (i = 0; i < 4; i++) {
105 layer->pitches[i] = fb->pitches[i];
106 layer->offsets[i] = fb->offsets[i];
107 }
108
109 DRM_DEBUG_DRIVER("%s is associated with mixer_id %d\n",
110 sti_layer_to_str(layer),
111 layer->mixer_id);
112 DRM_DEBUG_DRIVER("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
113 sti_layer_to_str(layer),
114 layer->dst_w, layer->dst_h, layer->dst_x, layer->dst_y,
115 layer->src_w, layer->src_h, layer->src_x,
116 layer->src_y);
117
118 DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
119 (char *)&layer->format, (unsigned long)layer->paddr);
120
121 if (!layer->ops->prepare)
122 goto err_no_prepare;
123
124 ret = layer->ops->prepare(layer, !layer->enabled);
125 if (!ret)
126 layer->enabled = true;
127
128 return ret;
129
130err_no_prepare:
131 DRM_ERROR("Cannot prepare\n");
132 return 1;
133}
134
135int sti_layer_commit(struct sti_layer *layer)
136{
137 if (!layer)
138 return 1;
139
140 if (!layer->ops->commit)
141 goto err_no_commit;
142
143 return layer->ops->commit(layer);
144
145err_no_commit:
146 DRM_ERROR("Cannot commit\n");
147 return 1;
148}
149
150int sti_layer_disable(struct sti_layer *layer)
151{
152 int ret;
153
154 DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
155 if (!layer)
156 return 1;
157
158 if (!layer->enabled)
159 return 0;
160
161 if (!layer->ops->disable)
162 goto err_no_disable;
163
164 ret = layer->ops->disable(layer);
165 if (!ret)
166 layer->enabled = false;
167 else
168 DRM_ERROR("Disable failed\n");
169
170 return ret;
171
172err_no_disable:
173 DRM_ERROR("Cannot disable\n");
174 return 1;
175}
176
177const uint32_t *sti_layer_get_formats(struct sti_layer *layer)
178{
179 if (!layer)
180 return NULL;
181
182 if (!layer->ops->get_formats)
183 return NULL;
184
185 return layer->ops->get_formats(layer);
186}
187
188unsigned int sti_layer_get_nb_formats(struct sti_layer *layer)
189{
190 if (!layer)
191 return 0;
192
193 if (!layer->ops->get_nb_formats)
194 return 0;
195
196 return layer->ops->get_nb_formats(layer);
197}
diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h
new file mode 100644
index 000000000000..198c3774cc12
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_layer.h
@@ -0,0 +1,123 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#ifndef _STI_LAYER_H_
10#define _STI_LAYER_H_
11
12#include <drm/drmP.h>
13
14#define to_sti_layer(x) container_of(x, struct sti_layer, plane)
15
16#define STI_LAYER_TYPE_SHIFT 8
17#define STI_LAYER_TYPE_MASK (~((1<<STI_LAYER_TYPE_SHIFT)-1))
18
19struct sti_layer;
20
21enum sti_layer_type {
22 STI_GDP = 1 << STI_LAYER_TYPE_SHIFT,
23 STI_VID = 2 << STI_LAYER_TYPE_SHIFT,
24 STI_CUR = 3 << STI_LAYER_TYPE_SHIFT,
25 STI_BCK = 4 << STI_LAYER_TYPE_SHIFT
26};
27
28enum sti_layer_id_of_type {
29 STI_ID_0 = 0,
30 STI_ID_1 = 1,
31 STI_ID_2 = 2,
32 STI_ID_3 = 3
33};
34
35enum sti_layer_desc {
36 STI_GDP_0 = STI_GDP | STI_ID_0,
37 STI_GDP_1 = STI_GDP | STI_ID_1,
38 STI_GDP_2 = STI_GDP | STI_ID_2,
39 STI_GDP_3 = STI_GDP | STI_ID_3,
40 STI_VID_0 = STI_VID | STI_ID_0,
41 STI_VID_1 = STI_VID | STI_ID_1,
42 STI_CURSOR = STI_CUR,
43 STI_BACK = STI_BCK
44};
45
46/**
47 * STI layer functions structure
48 *
49 * @get_formats: get layer supported formats
50 * @get_nb_formats: get number of format supported
51 * @init: initialize the layer
52 * @prepare: prepare layer before rendering
53 * @commit: set layer for rendering
54 * @disable: disable layer
55 */
56struct sti_layer_funcs {
57 const uint32_t* (*get_formats)(struct sti_layer *layer);
58 unsigned int (*get_nb_formats)(struct sti_layer *layer);
59 void (*init)(struct sti_layer *layer);
60 int (*prepare)(struct sti_layer *layer, bool first_prepare);
61 int (*commit)(struct sti_layer *layer);
62 int (*disable)(struct sti_layer *layer);
63};
64
65/**
66 * STI layer structure
67 *
68 * @plane: drm plane it is bound to (if any)
69 * @fb: drm fb it is bound to
70 * @mode: display mode
71 * @desc: layer type & id
72 * @device: driver device
73 * @regs: layer registers
74 * @ops: layer functions
75 * @zorder: layer z-order
76 * @mixer_id: id of the mixer used to display the layer
77 * @enabled: to know if the layer is active or not
78 * @src_x src_y: coordinates of the input (fb) area
79 * @src_w src_h: size of the input (fb) area
80 * @dst_x dst_y: coordinates of the output (crtc) area
81 * @dst_w dst_h: size of the output (crtc) area
82 * @format: format
83 * @pitches: pitch of 'planes' (eg: Y, U, V)
84 * @offsets: offset of 'planes'
85 * @paddr: physical address of the input buffer
86 */
87struct sti_layer {
88 struct drm_plane plane;
89 struct drm_framebuffer *fb;
90 struct drm_display_mode *mode;
91 enum sti_layer_desc desc;
92 struct device *dev;
93 void __iomem *regs;
94 const struct sti_layer_funcs *ops;
95 int zorder;
96 int mixer_id;
97 bool enabled;
98 int src_x, src_y;
99 int src_w, src_h;
100 int dst_x, dst_y;
101 int dst_w, dst_h;
102 uint32_t format;
103 unsigned int pitches[4];
104 unsigned int offsets[4];
105 dma_addr_t paddr;
106};
107
108struct sti_layer *sti_layer_create(struct device *dev, int desc,
109 void __iomem *baseaddr);
110int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
111 struct drm_display_mode *mode,
112 int mixer_id,
113 int dest_x, int dest_y,
114 int dest_w, int dest_h,
115 int src_x, int src_y,
116 int src_w, int src_h);
117int sti_layer_commit(struct sti_layer *layer);
118int sti_layer_disable(struct sti_layer *layer);
119const uint32_t *sti_layer_get_formats(struct sti_layer *layer);
120unsigned int sti_layer_get_nb_formats(struct sti_layer *layer);
121const char *sti_layer_to_str(struct sti_layer *layer);
122
123#endif
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
new file mode 100644
index 000000000000..79f369db9fb6
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -0,0 +1,249 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#include "sti_compositor.h"
10#include "sti_mixer.h"
11#include "sti_vtg.h"
12
13/* Identity: G=Y , B=Cb , R=Cr */
14static const u32 mixerColorSpaceMatIdentity[] = {
15 0x10000000, 0x00000000, 0x10000000, 0x00001000,
16 0x00000000, 0x00000000, 0x00000000, 0x00000000
17};
18
19/* regs offset */
20#define GAM_MIXER_CTL 0x00
21#define GAM_MIXER_BKC 0x04
22#define GAM_MIXER_BCO 0x0C
23#define GAM_MIXER_BCS 0x10
24#define GAM_MIXER_AVO 0x28
25#define GAM_MIXER_AVS 0x2C
26#define GAM_MIXER_CRB 0x34
27#define GAM_MIXER_ACT 0x38
28#define GAM_MIXER_MBP 0x3C
29#define GAM_MIXER_MX0 0x80
30
31/* id for depth of CRB reg */
32#define GAM_DEPTH_VID0_ID 1
33#define GAM_DEPTH_VID1_ID 2
34#define GAM_DEPTH_GDP0_ID 3
35#define GAM_DEPTH_GDP1_ID 4
36#define GAM_DEPTH_GDP2_ID 5
37#define GAM_DEPTH_GDP3_ID 6
38#define GAM_DEPTH_MASK_ID 7
39
40/* mask in CTL reg */
41#define GAM_CTL_BACK_MASK BIT(0)
42#define GAM_CTL_VID0_MASK BIT(1)
43#define GAM_CTL_VID1_MASK BIT(2)
44#define GAM_CTL_GDP0_MASK BIT(3)
45#define GAM_CTL_GDP1_MASK BIT(4)
46#define GAM_CTL_GDP2_MASK BIT(5)
47#define GAM_CTL_GDP3_MASK BIT(6)
48
49const char *sti_mixer_to_str(struct sti_mixer *mixer)
50{
51 switch (mixer->id) {
52 case STI_MIXER_MAIN:
53 return "MAIN_MIXER";
54 case STI_MIXER_AUX:
55 return "AUX_MIXER";
56 default:
57 return "<UNKNOWN MIXER>";
58 }
59}
60
61static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id)
62{
63 return readl(mixer->regs + reg_id);
64}
65
66static inline void sti_mixer_reg_write(struct sti_mixer *mixer,
67 u32 reg_id, u32 val)
68{
69 writel(val, mixer->regs + reg_id);
70}
71
72void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable)
73{
74 u32 val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL);
75
76 val &= ~GAM_CTL_BACK_MASK;
77 val |= enable;
78 sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
79}
80
81static void sti_mixer_set_background_color(struct sti_mixer *mixer,
82 u8 red, u8 green, u8 blue)
83{
84 u32 val = (red << 16) | (green << 8) | blue;
85
86 sti_mixer_reg_write(mixer, GAM_MIXER_BKC, val);
87}
88
89static void sti_mixer_set_background_area(struct sti_mixer *mixer,
90 struct drm_display_mode *mode)
91{
92 u32 ydo, xdo, yds, xds;
93
94 ydo = sti_vtg_get_line_number(*mode, 0);
95 yds = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
96 xdo = sti_vtg_get_pixel_number(*mode, 0);
97 xds = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
98
99 sti_mixer_reg_write(mixer, GAM_MIXER_BCO, ydo << 16 | xdo);
100 sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds);
101}
102
103int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer)
104{
105 int layer_id = 0, depth = layer->zorder;
106 u32 mask, val;
107
108 if (depth >= GAM_MIXER_NB_DEPTH_LEVEL)
109 return 1;
110
111 switch (layer->desc) {
112 case STI_GDP_0:
113 layer_id = GAM_DEPTH_GDP0_ID;
114 break;
115 case STI_GDP_1:
116 layer_id = GAM_DEPTH_GDP1_ID;
117 break;
118 case STI_GDP_2:
119 layer_id = GAM_DEPTH_GDP2_ID;
120 break;
121 case STI_GDP_3:
122 layer_id = GAM_DEPTH_GDP3_ID;
123 break;
124 case STI_VID_0:
125 layer_id = GAM_DEPTH_VID0_ID;
126 break;
127 case STI_VID_1:
128 layer_id = GAM_DEPTH_VID1_ID;
129 break;
130 default:
131 DRM_ERROR("Unknown layer %d\n", layer->desc);
132 return 1;
133 }
134 mask = GAM_DEPTH_MASK_ID << (3 * depth);
135 layer_id = layer_id << (3 * depth);
136
137 DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer),
138 sti_layer_to_str(layer), depth);
139 dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n",
140 layer_id, mask);
141
142 val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
143 val &= ~mask;
144 val |= layer_id;
145 sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val);
146
147 dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n",
148 sti_mixer_reg_read(mixer, GAM_MIXER_CRB));
149 return 0;
150}
151
152int sti_mixer_active_video_area(struct sti_mixer *mixer,
153 struct drm_display_mode *mode)
154{
155 u32 ydo, xdo, yds, xds;
156
157 ydo = sti_vtg_get_line_number(*mode, 0);
158 yds = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
159 xdo = sti_vtg_get_pixel_number(*mode, 0);
160 xds = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
161
162 DRM_DEBUG_DRIVER("%s active video area xdo:%d ydo:%d xds:%d yds:%d\n",
163 sti_mixer_to_str(mixer), xdo, ydo, xds, yds);
164 sti_mixer_reg_write(mixer, GAM_MIXER_AVO, ydo << 16 | xdo);
165 sti_mixer_reg_write(mixer, GAM_MIXER_AVS, yds << 16 | xds);
166
167 sti_mixer_set_background_color(mixer, 0xFF, 0, 0);
168
169 sti_mixer_set_background_area(mixer, mode);
170 sti_mixer_set_background_status(mixer, true);
171 return 0;
172}
173
174static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
175{
176 switch (layer->desc) {
177 case STI_BACK:
178 return GAM_CTL_BACK_MASK;
179 case STI_GDP_0:
180 return GAM_CTL_GDP0_MASK;
181 case STI_GDP_1:
182 return GAM_CTL_GDP1_MASK;
183 case STI_GDP_2:
184 return GAM_CTL_GDP2_MASK;
185 case STI_GDP_3:
186 return GAM_CTL_GDP3_MASK;
187 case STI_VID_0:
188 return GAM_CTL_VID0_MASK;
189 case STI_VID_1:
190 return GAM_CTL_VID1_MASK;
191 default:
192 return 0;
193 }
194}
195
196int sti_mixer_set_layer_status(struct sti_mixer *mixer,
197 struct sti_layer *layer, bool status)
198{
199 u32 mask, val;
200
201 DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable",
202 sti_mixer_to_str(mixer), sti_layer_to_str(layer));
203
204 mask = sti_mixer_get_layer_mask(layer);
205 if (!mask) {
206 DRM_ERROR("Can not find layer mask\n");
207 return -EINVAL;
208 }
209
210 val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL);
211 val &= ~mask;
212 val |= status ? mask : 0;
213 sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
214
215 return 0;
216}
217
218void sti_mixer_set_matrix(struct sti_mixer *mixer)
219{
220 unsigned int i;
221
222 for (i = 0; i < ARRAY_SIZE(mixerColorSpaceMatIdentity); i++)
223 sti_mixer_reg_write(mixer, GAM_MIXER_MX0 + (i * 4),
224 mixerColorSpaceMatIdentity[i]);
225}
226
227struct sti_mixer *sti_mixer_create(struct device *dev, int id,
228 void __iomem *baseaddr)
229{
230 struct sti_mixer *mixer = devm_kzalloc(dev, sizeof(*mixer), GFP_KERNEL);
231 struct device_node *np = dev->of_node;
232
233 dev_dbg(dev, "%s\n", __func__);
234 if (!mixer) {
235 DRM_ERROR("Failed to allocated memory for mixer\n");
236 return NULL;
237 }
238 mixer->regs = baseaddr;
239 mixer->dev = dev;
240 mixer->id = id;
241
242 if (of_device_is_compatible(np, "st,stih416-compositor"))
243 sti_mixer_set_matrix(mixer);
244
245 DRM_DEBUG_DRIVER("%s created. Regs=%p\n",
246 sti_mixer_to_str(mixer), mixer->regs);
247
248 return mixer;
249}
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
new file mode 100644
index 000000000000..874372102e52
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -0,0 +1,54 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#ifndef _STI_MIXER_H_
10#define _STI_MIXER_H_
11
12#include <drm/drmP.h>
13
14#include "sti_layer.h"
15
16#define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc)
17
18/**
19 * STI Mixer subdevice structure
20 *
21 * @dev: driver device
22 * @regs: mixer registers
23 * @id: id of the mixer
24 * @drm_crtc: crtc object link to the mixer
25 * @pending_event: set if a flip event is pending on crtc
26 */
27struct sti_mixer {
28 struct device *dev;
29 void __iomem *regs;
30 int id;
31 struct drm_crtc drm_crtc;
32 struct drm_pending_vblank_event *pending_event;
33};
34
35const char *sti_mixer_to_str(struct sti_mixer *mixer);
36
37struct sti_mixer *sti_mixer_create(struct device *dev, int id,
38 void __iomem *baseaddr);
39
40int sti_mixer_set_layer_status(struct sti_mixer *mixer,
41 struct sti_layer *layer, bool status);
42int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer);
43int sti_mixer_active_video_area(struct sti_mixer *mixer,
44 struct drm_display_mode *mode);
45
46void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
47
48/* depth in Cross-bar control = z order */
49#define GAM_MIXER_NB_DEPTH_LEVEL 7
50
51#define STI_MIXER_MAIN 0
52#define STI_MIXER_AUX 1
53
54#endif
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
new file mode 100644
index 000000000000..b69e26fee76e
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -0,0 +1,648 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Vincent Abriou <vincent.abriou@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#include <linux/clk.h>
10#include <linux/component.h>
11#include <linux/module.h>
12#include <linux/of_platform.h>
13#include <linux/platform_device.h>
14#include <linux/reset.h>
15
16#include <drm/drmP.h>
17#include <drm/drm_crtc_helper.h>
18
19/* glue registers */
20#define TVO_CSC_MAIN_M0 0x000
21#define TVO_CSC_MAIN_M1 0x004
22#define TVO_CSC_MAIN_M2 0x008
23#define TVO_CSC_MAIN_M3 0x00c
24#define TVO_CSC_MAIN_M4 0x010
25#define TVO_CSC_MAIN_M5 0x014
26#define TVO_CSC_MAIN_M6 0x018
27#define TVO_CSC_MAIN_M7 0x01c
28#define TVO_MAIN_IN_VID_FORMAT 0x030
29#define TVO_CSC_AUX_M0 0x100
30#define TVO_CSC_AUX_M1 0x104
31#define TVO_CSC_AUX_M2 0x108
32#define TVO_CSC_AUX_M3 0x10c
33#define TVO_CSC_AUX_M4 0x110
34#define TVO_CSC_AUX_M5 0x114
35#define TVO_CSC_AUX_M6 0x118
36#define TVO_CSC_AUX_M7 0x11c
37#define TVO_AUX_IN_VID_FORMAT 0x130
38#define TVO_VIP_HDF 0x400
39#define TVO_HD_SYNC_SEL 0x418
40#define TVO_HD_DAC_CFG_OFF 0x420
41#define TVO_VIP_HDMI 0x500
42#define TVO_HDMI_FORCE_COLOR_0 0x504
43#define TVO_HDMI_FORCE_COLOR_1 0x508
44#define TVO_HDMI_CLIP_VALUE_B_CB 0x50c
45#define TVO_HDMI_CLIP_VALUE_Y_G 0x510
46#define TVO_HDMI_CLIP_VALUE_R_CR 0x514
47#define TVO_HDMI_SYNC_SEL 0x518
48#define TVO_HDMI_DFV_OBS 0x540
49
50#define TVO_IN_FMT_SIGNED BIT(0)
51#define TVO_SYNC_EXT BIT(4)
52
53#define TVO_VIP_REORDER_R_SHIFT 24
54#define TVO_VIP_REORDER_G_SHIFT 20
55#define TVO_VIP_REORDER_B_SHIFT 16
56#define TVO_VIP_REORDER_MASK 0x3
57#define TVO_VIP_REORDER_Y_G_SEL 0
58#define TVO_VIP_REORDER_CB_B_SEL 1
59#define TVO_VIP_REORDER_CR_R_SEL 2
60
61#define TVO_VIP_CLIP_SHIFT 8
62#define TVO_VIP_CLIP_MASK 0x7
63#define TVO_VIP_CLIP_DISABLED 0
64#define TVO_VIP_CLIP_EAV_SAV 1
65#define TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y 2
66#define TVO_VIP_CLIP_LIMITED_RANGE_CB_CR 3
67#define TVO_VIP_CLIP_PROG_RANGE 4
68
69#define TVO_VIP_RND_SHIFT 4
70#define TVO_VIP_RND_MASK 0x3
71#define TVO_VIP_RND_8BIT_ROUNDED 0
72#define TVO_VIP_RND_10BIT_ROUNDED 1
73#define TVO_VIP_RND_12BIT_ROUNDED 2
74
75#define TVO_VIP_SEL_INPUT_MASK 0xf
76#define TVO_VIP_SEL_INPUT_MAIN 0x0
77#define TVO_VIP_SEL_INPUT_AUX 0x8
78#define TVO_VIP_SEL_INPUT_FORCE_COLOR 0xf
79#define TVO_VIP_SEL_INPUT_BYPASS_MASK 0x1
80#define TVO_VIP_SEL_INPUT_BYPASSED 1
81
82#define TVO_SYNC_MAIN_VTG_SET_REF 0x00
83#define TVO_SYNC_MAIN_VTG_SET_1 0x01
84#define TVO_SYNC_MAIN_VTG_SET_2 0x02
85#define TVO_SYNC_MAIN_VTG_SET_3 0x03
86#define TVO_SYNC_MAIN_VTG_SET_4 0x04
87#define TVO_SYNC_MAIN_VTG_SET_5 0x05
88#define TVO_SYNC_MAIN_VTG_SET_6 0x06
89#define TVO_SYNC_AUX_VTG_SET_REF 0x10
90#define TVO_SYNC_AUX_VTG_SET_1 0x11
91#define TVO_SYNC_AUX_VTG_SET_2 0x12
92#define TVO_SYNC_AUX_VTG_SET_3 0x13
93#define TVO_SYNC_AUX_VTG_SET_4 0x14
94#define TVO_SYNC_AUX_VTG_SET_5 0x15
95#define TVO_SYNC_AUX_VTG_SET_6 0x16
96
97#define TVO_SYNC_HD_DCS_SHIFT 8
98
99#define ENCODER_MAIN_CRTC_MASK BIT(0)
100
101/* enum listing the supported output data format */
102enum sti_tvout_video_out_type {
103 STI_TVOUT_VIDEO_OUT_RGB,
104 STI_TVOUT_VIDEO_OUT_YUV,
105};
106
107struct sti_tvout {
108 struct device *dev;
109 struct drm_device *drm_dev;
110 void __iomem *regs;
111 struct reset_control *reset;
112 struct drm_encoder *hdmi;
113 struct drm_encoder *hda;
114};
115
116struct sti_tvout_encoder {
117 struct drm_encoder encoder;
118 struct sti_tvout *tvout;
119};
120
121#define to_sti_tvout_encoder(x) \
122 container_of(x, struct sti_tvout_encoder, encoder)
123
124#define to_sti_tvout(x) to_sti_tvout_encoder(x)->tvout
125
126/* preformatter conversion matrix */
127static const u32 rgb_to_ycbcr_601[8] = {
128 0xF927082E, 0x04C9FEAB, 0x01D30964, 0xFA95FD3D,
129 0x0000082E, 0x00002000, 0x00002000, 0x00000000
130};
131
132/* 709 RGB to YCbCr */
133static const u32 rgb_to_ycbcr_709[8] = {
134 0xF891082F, 0x0367FF40, 0x01280B71, 0xF9B1FE20,
135 0x0000082F, 0x00002000, 0x00002000, 0x00000000
136};
137
138static u32 tvout_read(struct sti_tvout *tvout, int offset)
139{
140 return readl(tvout->regs + offset);
141}
142
143static void tvout_write(struct sti_tvout *tvout, u32 val, int offset)
144{
145 writel(val, tvout->regs + offset);
146}
147
148/**
149 * Set the clipping mode of a VIP
150 *
151 * @tvout: tvout structure
152 * @cr_r:
153 * @y_g:
154 * @cb_b:
155 */
156static void tvout_vip_set_color_order(struct sti_tvout *tvout,
157 u32 cr_r, u32 y_g, u32 cb_b)
158{
159 u32 val = tvout_read(tvout, TVO_VIP_HDMI);
160
161 val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_R_SHIFT);
162 val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_G_SHIFT);
163 val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_B_SHIFT);
164 val |= cr_r << TVO_VIP_REORDER_R_SHIFT;
165 val |= y_g << TVO_VIP_REORDER_G_SHIFT;
166 val |= cb_b << TVO_VIP_REORDER_B_SHIFT;
167
168 tvout_write(tvout, val, TVO_VIP_HDMI);
169}
170
171/**
172 * Set the clipping mode of a VIP
173 *
174 * @tvout: tvout structure
175 * @range: clipping range
176 */
177static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, u32 range)
178{
179 u32 val = tvout_read(tvout, TVO_VIP_HDMI);
180
181 val &= ~(TVO_VIP_CLIP_MASK << TVO_VIP_CLIP_SHIFT);
182 val |= range << TVO_VIP_CLIP_SHIFT;
183 tvout_write(tvout, val, TVO_VIP_HDMI);
184}
185
186/**
187 * Set the rounded value of a VIP
188 *
189 * @tvout: tvout structure
190 * @rnd: rounded val per component
191 */
192static void tvout_vip_set_rnd(struct sti_tvout *tvout, u32 rnd)
193{
194 u32 val = tvout_read(tvout, TVO_VIP_HDMI);
195
196 val &= ~(TVO_VIP_RND_MASK << TVO_VIP_RND_SHIFT);
197 val |= rnd << TVO_VIP_RND_SHIFT;
198 tvout_write(tvout, val, TVO_VIP_HDMI);
199}
200
201/**
202 * Select the VIP input
203 *
204 * @tvout: tvout structure
205 * @sel_input: selected_input (main/aux + conv)
206 */
207static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
208 bool main_path,
209 bool sel_input_logic_inverted,
210 enum sti_tvout_video_out_type video_out)
211{
212 u32 sel_input;
213 u32 val = tvout_read(tvout, TVO_VIP_HDMI);
214
215 if (main_path)
216 sel_input = TVO_VIP_SEL_INPUT_MAIN;
217 else
218 sel_input = TVO_VIP_SEL_INPUT_AUX;
219
220 switch (video_out) {
221 case STI_TVOUT_VIDEO_OUT_RGB:
222 sel_input |= TVO_VIP_SEL_INPUT_BYPASSED;
223 break;
224 case STI_TVOUT_VIDEO_OUT_YUV:
225 sel_input &= ~TVO_VIP_SEL_INPUT_BYPASSED;
226 break;
227 }
228
229 /* on stih407 chip the sel_input bypass mode logic is inverted */
230 if (sel_input_logic_inverted)
231 sel_input = sel_input ^ TVO_VIP_SEL_INPUT_BYPASS_MASK;
232
233 val &= ~TVO_VIP_SEL_INPUT_MASK;
234 val |= sel_input;
235 tvout_write(tvout, val, TVO_VIP_HDMI);
236}
237
238/**
239 * Select the input video signed or unsigned
240 *
241 * @tvout: tvout structure
242 * @in_vid_signed: used video input format
243 */
244static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout, u32 in_vid_fmt)
245{
246 u32 val = tvout_read(tvout, TVO_VIP_HDMI);
247
248 val &= ~TVO_IN_FMT_SIGNED;
249 val |= in_vid_fmt;
250 tvout_write(tvout, val, TVO_MAIN_IN_VID_FORMAT);
251}
252
253/**
254 * Start VIP block for HDMI output
255 *
256 * @tvout: pointer on tvout structure
257 * @main_path: true if main path has to be used in the vip configuration
258 * else aux path is used.
259 */
260static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
261{
262 struct device_node *node = tvout->dev->of_node;
263 bool sel_input_logic_inverted = false;
264
265 dev_dbg(tvout->dev, "%s\n", __func__);
266
267 if (main_path) {
268 DRM_DEBUG_DRIVER("main vip for hdmi\n");
269 /* select the input sync for hdmi = VTG set 1 */
270 tvout_write(tvout, TVO_SYNC_MAIN_VTG_SET_1, TVO_HDMI_SYNC_SEL);
271 } else {
272 DRM_DEBUG_DRIVER("aux vip for hdmi\n");
273 /* select the input sync for hdmi = VTG set 1 */
274 tvout_write(tvout, TVO_SYNC_AUX_VTG_SET_1, TVO_HDMI_SYNC_SEL);
275 }
276
277 /* set color channel order */
278 tvout_vip_set_color_order(tvout,
279 TVO_VIP_REORDER_CR_R_SEL,
280 TVO_VIP_REORDER_Y_G_SEL,
281 TVO_VIP_REORDER_CB_B_SEL);
282
283 /* set clipping mode (Limited range RGB/Y) */
284 tvout_vip_set_clip_mode(tvout, TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y);
285
286 /* set round mode (rounded to 8-bit per component) */
287 tvout_vip_set_rnd(tvout, TVO_VIP_RND_8BIT_ROUNDED);
288
289 if (of_device_is_compatible(node, "st,stih407-tvout")) {
290 /* set input video format */
291 tvout_vip_set_in_vid_fmt(tvout->regs + TVO_MAIN_IN_VID_FORMAT,
292 TVO_IN_FMT_SIGNED);
293 sel_input_logic_inverted = true;
294 }
295
296 /* input selection */
297 tvout_vip_set_sel_input(tvout, main_path,
298 sel_input_logic_inverted, STI_TVOUT_VIDEO_OUT_RGB);
299}
300
301/**
302 * Start HDF VIP and HD DAC
303 *
304 * @tvout: pointer on tvout structure
305 * @main_path: true if main path has to be used in the vip configuration
306 * else aux path is used.
307 */
308static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
309{
310 struct device_node *node = tvout->dev->of_node;
311 bool sel_input_logic_inverted = false;
312
313 dev_dbg(tvout->dev, "%s\n", __func__);
314
315 if (!main_path) {
316 DRM_ERROR("HD Analog on aux not implemented\n");
317 return;
318 }
319
320 DRM_DEBUG_DRIVER("main vip for HDF\n");
321
322 /* set color channel order */
323 tvout_vip_set_color_order(tvout->regs + TVO_VIP_HDF,
324 TVO_VIP_REORDER_CR_R_SEL,
325 TVO_VIP_REORDER_Y_G_SEL,
326 TVO_VIP_REORDER_CB_B_SEL);
327
328 /* set clipping mode (Limited range RGB/Y) */
329 tvout_vip_set_clip_mode(tvout->regs + TVO_VIP_HDF,
330 TVO_VIP_CLIP_LIMITED_RANGE_CB_CR);
331
332 /* set round mode (rounded to 10-bit per component) */
333 tvout_vip_set_rnd(tvout->regs + TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED);
334
335 if (of_device_is_compatible(node, "st,stih407-tvout")) {
336 /* set input video format */
337 tvout_vip_set_in_vid_fmt(tvout, TVO_IN_FMT_SIGNED);
338 sel_input_logic_inverted = true;
339 }
340
341 /* Input selection */
342 tvout_vip_set_sel_input(tvout->regs + TVO_VIP_HDF,
343 main_path,
344 sel_input_logic_inverted,
345 STI_TVOUT_VIDEO_OUT_YUV);
346
347 /* select the input sync for HD analog = VTG set 3
348 * and HD DCS = VTG set 2 */
349 tvout_write(tvout,
350 (TVO_SYNC_MAIN_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT)
351 | TVO_SYNC_MAIN_VTG_SET_3,
352 TVO_HD_SYNC_SEL);
353
354 /* power up HD DAC */
355 tvout_write(tvout, 0, TVO_HD_DAC_CFG_OFF);
356}
357
358static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode)
359{
360}
361
362static bool sti_tvout_encoder_mode_fixup(struct drm_encoder *encoder,
363 const struct drm_display_mode *mode,
364 struct drm_display_mode *adjusted_mode)
365{
366 return true;
367}
368
369static void sti_tvout_encoder_mode_set(struct drm_encoder *encoder,
370 struct drm_display_mode *mode,
371 struct drm_display_mode *adjusted_mode)
372{
373}
374
375static void sti_tvout_encoder_prepare(struct drm_encoder *encoder)
376{
377}
378
379static void sti_tvout_encoder_destroy(struct drm_encoder *encoder)
380{
381 struct sti_tvout_encoder *sti_encoder = to_sti_tvout_encoder(encoder);
382
383 drm_encoder_cleanup(encoder);
384 kfree(sti_encoder);
385}
386
387static const struct drm_encoder_funcs sti_tvout_encoder_funcs = {
388 .destroy = sti_tvout_encoder_destroy,
389};
390
391static void sti_hda_encoder_commit(struct drm_encoder *encoder)
392{
393 struct sti_tvout *tvout = to_sti_tvout(encoder);
394
395 tvout_hda_start(tvout, true);
396}
397
398static void sti_hda_encoder_disable(struct drm_encoder *encoder)
399{
400 struct sti_tvout *tvout = to_sti_tvout(encoder);
401
402 /* reset VIP register */
403 tvout_write(tvout, 0x0, TVO_VIP_HDF);
404
405 /* power down HD DAC */
406 tvout_write(tvout, 1, TVO_HD_DAC_CFG_OFF);
407}
408
409static const struct drm_encoder_helper_funcs sti_hda_encoder_helper_funcs = {
410 .dpms = sti_tvout_encoder_dpms,
411 .mode_fixup = sti_tvout_encoder_mode_fixup,
412 .mode_set = sti_tvout_encoder_mode_set,
413 .prepare = sti_tvout_encoder_prepare,
414 .commit = sti_hda_encoder_commit,
415 .disable = sti_hda_encoder_disable,
416};
417
418static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev,
419 struct sti_tvout *tvout)
420{
421 struct sti_tvout_encoder *encoder;
422 struct drm_encoder *drm_encoder;
423
424 encoder = devm_kzalloc(tvout->dev, sizeof(*encoder), GFP_KERNEL);
425 if (!encoder)
426 return NULL;
427
428 encoder->tvout = tvout;
429
430 drm_encoder = (struct drm_encoder *) encoder;
431
432 drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK;
433 drm_encoder->possible_clones = 1 << 0;
434
435 drm_encoder_init(dev, drm_encoder,
436 &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_DAC);
437
438 drm_encoder_helper_add(drm_encoder, &sti_hda_encoder_helper_funcs);
439
440 return drm_encoder;
441}
442
443static void sti_hdmi_encoder_commit(struct drm_encoder *encoder)
444{
445 struct sti_tvout *tvout = to_sti_tvout(encoder);
446
447 tvout_hdmi_start(tvout, true);
448}
449
450static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
451{
452 struct sti_tvout *tvout = to_sti_tvout(encoder);
453
454 /* reset VIP register */
455 tvout_write(tvout, 0x0, TVO_VIP_HDMI);
456}
457
458static const struct drm_encoder_helper_funcs sti_hdmi_encoder_helper_funcs = {
459 .dpms = sti_tvout_encoder_dpms,
460 .mode_fixup = sti_tvout_encoder_mode_fixup,
461 .mode_set = sti_tvout_encoder_mode_set,
462 .prepare = sti_tvout_encoder_prepare,
463 .commit = sti_hdmi_encoder_commit,
464 .disable = sti_hdmi_encoder_disable,
465};
466
467static struct drm_encoder *sti_tvout_create_hdmi_encoder(struct drm_device *dev,
468 struct sti_tvout *tvout)
469{
470 struct sti_tvout_encoder *encoder;
471 struct drm_encoder *drm_encoder;
472
473 encoder = devm_kzalloc(tvout->dev, sizeof(*encoder), GFP_KERNEL);
474 if (!encoder)
475 return NULL;
476
477 encoder->tvout = tvout;
478
479 drm_encoder = (struct drm_encoder *) encoder;
480
481 drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK;
482 drm_encoder->possible_clones = 1 << 1;
483
484 drm_encoder_init(dev, drm_encoder,
485 &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_TMDS);
486
487 drm_encoder_helper_add(drm_encoder, &sti_hdmi_encoder_helper_funcs);
488
489 return drm_encoder;
490}
491
492static void sti_tvout_create_encoders(struct drm_device *dev,
493 struct sti_tvout *tvout)
494{
495 tvout->hdmi = sti_tvout_create_hdmi_encoder(dev, tvout);
496 tvout->hda = sti_tvout_create_hda_encoder(dev, tvout);
497}
498
499static void sti_tvout_destroy_encoders(struct sti_tvout *tvout)
500{
501 if (tvout->hdmi)
502 drm_encoder_cleanup(tvout->hdmi);
503 tvout->hdmi = NULL;
504
505 if (tvout->hda)
506 drm_encoder_cleanup(tvout->hda);
507 tvout->hda = NULL;
508}
509
510static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
511{
512 struct sti_tvout *tvout = dev_get_drvdata(dev);
513 struct drm_device *drm_dev = data;
514 unsigned int i;
515 int ret;
516
517 tvout->drm_dev = drm_dev;
518
519 /* set preformatter matrix */
520 for (i = 0; i < 8; i++) {
521 tvout_write(tvout, rgb_to_ycbcr_601[i],
522 TVO_CSC_MAIN_M0 + (i * 4));
523 tvout_write(tvout, rgb_to_ycbcr_601[i],
524 TVO_CSC_AUX_M0 + (i * 4));
525 }
526
527 sti_tvout_create_encoders(drm_dev, tvout);
528
529 ret = component_bind_all(dev, drm_dev);
530 if (ret)
531 sti_tvout_destroy_encoders(tvout);
532
533 return ret;
534}
535
536static void sti_tvout_unbind(struct device *dev, struct device *master,
537 void *data)
538{
539 /* do nothing */
540}
541
542static const struct component_ops sti_tvout_ops = {
543 .bind = sti_tvout_bind,
544 .unbind = sti_tvout_unbind,
545};
546
547static int compare_of(struct device *dev, void *data)
548{
549 return dev->of_node == data;
550}
551
552static int sti_tvout_master_bind(struct device *dev)
553{
554 return 0;
555}
556
557static void sti_tvout_master_unbind(struct device *dev)
558{
559 /* do nothing */
560}
561
562static const struct component_master_ops sti_tvout_master_ops = {
563 .bind = sti_tvout_master_bind,
564 .unbind = sti_tvout_master_unbind,
565};
566
567static int sti_tvout_probe(struct platform_device *pdev)
568{
569 struct device *dev = &pdev->dev;
570 struct device_node *node = dev->of_node;
571 struct sti_tvout *tvout;
572 struct resource *res;
573 struct device_node *child_np;
574 struct component_match *match = NULL;
575
576 DRM_INFO("%s\n", __func__);
577
578 if (!node)
579 return -ENODEV;
580
581 tvout = devm_kzalloc(dev, sizeof(*tvout), GFP_KERNEL);
582 if (!tvout)
583 return -ENOMEM;
584
585 tvout->dev = dev;
586
587 /* get Memory ressources */
588 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tvout-reg");
589 if (!res) {
590 DRM_ERROR("Invalid glue resource\n");
591 return -ENOMEM;
592 }
593 tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
594 if (IS_ERR(tvout->regs))
595 return PTR_ERR(tvout->regs);
596
597 /* get reset resources */
598 tvout->reset = devm_reset_control_get(dev, "tvout");
599 /* take tvout out of reset */
600 if (!IS_ERR(tvout->reset))
601 reset_control_deassert(tvout->reset);
602
603 platform_set_drvdata(pdev, tvout);
604
605 of_platform_populate(node, NULL, NULL, dev);
606
607 child_np = of_get_next_available_child(node, NULL);
608
609 while (child_np) {
610 component_match_add(dev, &match, compare_of, child_np);
611 of_node_put(child_np);
612 child_np = of_get_next_available_child(node, child_np);
613 }
614
615 component_master_add_with_match(dev, &sti_tvout_master_ops, match);
616
617 return component_add(dev, &sti_tvout_ops);
618}
619
620static int sti_tvout_remove(struct platform_device *pdev)
621{
622 component_master_del(&pdev->dev, &sti_tvout_master_ops);
623 component_del(&pdev->dev, &sti_tvout_ops);
624 return 0;
625}
626
627static struct of_device_id tvout_of_match[] = {
628 { .compatible = "st,stih416-tvout", },
629 { .compatible = "st,stih407-tvout", },
630 { /* end node */ }
631};
632MODULE_DEVICE_TABLE(of, tvout_of_match);
633
634struct platform_driver sti_tvout_driver = {
635 .driver = {
636 .name = "sti-tvout",
637 .owner = THIS_MODULE,
638 .of_match_table = tvout_of_match,
639 },
640 .probe = sti_tvout_probe,
641 .remove = sti_tvout_remove,
642};
643
644module_platform_driver(sti_tvout_driver);
645
646MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
647MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
648MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
new file mode 100644
index 000000000000..10ced6a479f4
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -0,0 +1,138 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#include <drm/drmP.h>
8
9#include "sti_layer.h"
10#include "sti_vid.h"
11#include "sti_vtg.h"
12
13/* Registers */
14#define VID_CTL 0x00
15#define VID_ALP 0x04
16#define VID_CLF 0x08
17#define VID_VPO 0x0C
18#define VID_VPS 0x10
19#define VID_KEY1 0x28
20#define VID_KEY2 0x2C
21#define VID_MPR0 0x30
22#define VID_MPR1 0x34
23#define VID_MPR2 0x38
24#define VID_MPR3 0x3C
25#define VID_MST 0x68
26#define VID_BC 0x70
27#define VID_TINT 0x74
28#define VID_CSAT 0x78
29
30/* Registers values */
31#define VID_CTL_IGNORE (BIT(31) | BIT(30))
32#define VID_CTL_PSI_ENABLE (BIT(2) | BIT(1) | BIT(0))
33#define VID_ALP_OPAQUE 0x00000080
34#define VID_BC_DFLT 0x00008000
35#define VID_TINT_DFLT 0x00000000
36#define VID_CSAT_DFLT 0x00000080
37/* YCbCr to RGB BT709:
38 * R = Y+1.5391Cr
39 * G = Y-0.4590Cr-0.1826Cb
40 * B = Y+1.8125Cb */
41#define VID_MPR0_BT709 0x0A800000
42#define VID_MPR1_BT709 0x0AC50000
43#define VID_MPR2_BT709 0x07150545
44#define VID_MPR3_BT709 0x00000AE8
45
46static int sti_vid_prepare_layer(struct sti_layer *vid, bool first_prepare)
47{
48 u32 val;
49
50 /* Unmask */
51 val = readl(vid->regs + VID_CTL);
52 val &= ~VID_CTL_IGNORE;
53 writel(val, vid->regs + VID_CTL);
54
55 return 0;
56}
57
58static int sti_vid_commit_layer(struct sti_layer *vid)
59{
60 struct drm_display_mode *mode = vid->mode;
61 u32 ydo, xdo, yds, xds;
62
63 ydo = sti_vtg_get_line_number(*mode, vid->dst_y);
64 yds = sti_vtg_get_line_number(*mode, vid->dst_y + vid->dst_h - 1);
65 xdo = sti_vtg_get_pixel_number(*mode, vid->dst_x);
66 xds = sti_vtg_get_pixel_number(*mode, vid->dst_x + vid->dst_w - 1);
67
68 writel((ydo << 16) | xdo, vid->regs + VID_VPO);
69 writel((yds << 16) | xds, vid->regs + VID_VPS);
70
71 return 0;
72}
73
74static int sti_vid_disable_layer(struct sti_layer *vid)
75{
76 u32 val;
77
78 /* Mask */
79 val = readl(vid->regs + VID_CTL);
80 val |= VID_CTL_IGNORE;
81 writel(val, vid->regs + VID_CTL);
82
83 return 0;
84}
85
86static const uint32_t *sti_vid_get_formats(struct sti_layer *layer)
87{
88 return NULL;
89}
90
91static unsigned int sti_vid_get_nb_formats(struct sti_layer *layer)
92{
93 return 0;
94}
95
96static void sti_vid_init(struct sti_layer *vid)
97{
98 /* Enable PSI, Mask layer */
99 writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL);
100
101 /* Opaque */
102 writel(VID_ALP_OPAQUE, vid->regs + VID_ALP);
103
104 /* Color conversion parameters */
105 writel(VID_MPR0_BT709, vid->regs + VID_MPR0);
106 writel(VID_MPR1_BT709, vid->regs + VID_MPR1);
107 writel(VID_MPR2_BT709, vid->regs + VID_MPR2);
108 writel(VID_MPR3_BT709, vid->regs + VID_MPR3);
109
110 /* Brightness, contrast, tint, saturation */
111 writel(VID_BC_DFLT, vid->regs + VID_BC);
112 writel(VID_TINT_DFLT, vid->regs + VID_TINT);
113 writel(VID_CSAT_DFLT, vid->regs + VID_CSAT);
114}
115
116static const struct sti_layer_funcs vid_ops = {
117 .get_formats = sti_vid_get_formats,
118 .get_nb_formats = sti_vid_get_nb_formats,
119 .init = sti_vid_init,
120 .prepare = sti_vid_prepare_layer,
121 .commit = sti_vid_commit_layer,
122 .disable = sti_vid_disable_layer,
123};
124
125struct sti_layer *sti_vid_create(struct device *dev)
126{
127 struct sti_layer *vid;
128
129 vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL);
130 if (!vid) {
131 DRM_ERROR("Failed to allocate memory for VID\n");
132 return NULL;
133 }
134
135 vid->ops = &vid_ops;
136
137 return vid;
138}
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
new file mode 100644
index 000000000000..2c0aecd63294
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -0,0 +1,12 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_VID_H_
8#define _STI_VID_H_
9
10struct sti_layer *sti_vid_create(struct device *dev);
11
12#endif
diff --git a/drivers/gpu/drm/sti/sti_vtac.c b/drivers/gpu/drm/sti/sti_vtac.c
new file mode 100644
index 000000000000..82a51d488434
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vtac.c
@@ -0,0 +1,215 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#include <linux/clk.h>
8#include <linux/io.h>
9#include <linux/module.h>
10#include <linux/of.h>
11#include <linux/platform_device.h>
12
13#include <drm/drmP.h>
14
15/* registers offset */
16#define VTAC_CONFIG 0x00
17#define VTAC_RX_FIFO_CONFIG 0x04
18#define VTAC_FIFO_CONFIG_VAL 0x04
19
20#define VTAC_SYS_CFG8521 0x824
21#define VTAC_SYS_CFG8522 0x828
22
23/* Number of phyts per pixel */
24#define VTAC_2_5_PPP 0x0005
25#define VTAC_3_PPP 0x0006
26#define VTAC_4_PPP 0x0008
27#define VTAC_5_PPP 0x000A
28#define VTAC_6_PPP 0x000C
29#define VTAC_13_PPP 0x001A
30#define VTAC_14_PPP 0x001C
31#define VTAC_15_PPP 0x001E
32#define VTAC_16_PPP 0x0020
33#define VTAC_17_PPP 0x0022
34#define VTAC_18_PPP 0x0024
35
36/* enable bits */
37#define VTAC_ENABLE 0x3003
38
39#define VTAC_TX_PHY_ENABLE_CLK_PHY BIT(0)
40#define VTAC_TX_PHY_ENABLE_CLK_DLL BIT(1)
41#define VTAC_TX_PHY_PLL_NOT_OSC_MODE BIT(3)
42#define VTAC_TX_PHY_RST_N_DLL_SWITCH BIT(4)
43#define VTAC_TX_PHY_PROG_N3 BIT(9)
44
45
46/**
47 * VTAC mode structure
48 *
49 * @vid_in_width: Video Data Resolution
50 * @phyts_width: Width of phyt buses(phyt low and phyt high).
51 * @phyts_per_pixel: Number of phyts sent per pixel
52 */
53struct sti_vtac_mode {
54 u32 vid_in_width;
55 u32 phyts_width;
56 u32 phyts_per_pixel;
57};
58
59static const struct sti_vtac_mode vtac_mode_main = {0x2, 0x2, VTAC_5_PPP};
60static const struct sti_vtac_mode vtac_mode_aux = {0x1, 0x0, VTAC_17_PPP};
61
62/**
63 * VTAC structure
64 *
65 * @dev: pointer to device structure
66 * @regs: ioremapped registers for RX and TX devices
67 * @phy_regs: phy registers for TX device
68 * @clk: clock
69 * @mode: main or auxillary configuration mode
70 */
71struct sti_vtac {
72 struct device *dev;
73 void __iomem *regs;
74 void __iomem *phy_regs;
75 struct clk *clk;
76 const struct sti_vtac_mode *mode;
77};
78
79static void sti_vtac_rx_set_config(struct sti_vtac *vtac)
80{
81 u32 config;
82
83 /* Enable VTAC clock */
84 if (clk_prepare_enable(vtac->clk))
85 DRM_ERROR("Failed to prepare/enable vtac_rx clock.\n");
86
87 writel(VTAC_FIFO_CONFIG_VAL, vtac->regs + VTAC_RX_FIFO_CONFIG);
88
89 config = VTAC_ENABLE;
90 config |= vtac->mode->vid_in_width << 4;
91 config |= vtac->mode->phyts_width << 16;
92 config |= vtac->mode->phyts_per_pixel << 23;
93 writel(config, vtac->regs + VTAC_CONFIG);
94}
95
96static void sti_vtac_tx_set_config(struct sti_vtac *vtac)
97{
98 u32 phy_config;
99 u32 config;
100
101 /* Enable VTAC clock */
102 if (clk_prepare_enable(vtac->clk))
103 DRM_ERROR("Failed to prepare/enable vtac_tx clock.\n");
104
105 /* Configure vtac phy */
106 phy_config = 0x00000000;
107 writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8522);
108 phy_config = VTAC_TX_PHY_ENABLE_CLK_PHY;
109 writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
110 phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
111 phy_config |= VTAC_TX_PHY_PROG_N3;
112 writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
113 phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
114 phy_config |= VTAC_TX_PHY_ENABLE_CLK_DLL;
115 writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
116 phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
117 phy_config |= VTAC_TX_PHY_RST_N_DLL_SWITCH;
118 writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
119 phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
120 phy_config |= VTAC_TX_PHY_PLL_NOT_OSC_MODE;
121 writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
122
123 /* Configure vtac tx */
124 config = VTAC_ENABLE;
125 config |= vtac->mode->vid_in_width << 4;
126 config |= vtac->mode->phyts_width << 16;
127 config |= vtac->mode->phyts_per_pixel << 23;
128 writel(config, vtac->regs + VTAC_CONFIG);
129}
130
131static const struct of_device_id vtac_of_match[] = {
132 {
133 .compatible = "st,vtac-main",
134 .data = &vtac_mode_main,
135 }, {
136 .compatible = "st,vtac-aux",
137 .data = &vtac_mode_aux,
138 }, {
139 /* end node */
140 }
141};
142MODULE_DEVICE_TABLE(of, vtac_of_match);
143
144static int sti_vtac_probe(struct platform_device *pdev)
145{
146 struct device *dev = &pdev->dev;
147 struct device_node *np = dev->of_node;
148 const struct of_device_id *id;
149 struct sti_vtac *vtac;
150 struct resource *res;
151
152 vtac = devm_kzalloc(dev, sizeof(*vtac), GFP_KERNEL);
153 if (!vtac)
154 return -ENOMEM;
155
156 vtac->dev = dev;
157
158 id = of_match_node(vtac_of_match, np);
159 if (!id)
160 return -ENOMEM;
161
162 vtac->mode = id->data;
163
164 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
165 if (!res) {
166 DRM_ERROR("Invalid resource\n");
167 return -ENOMEM;
168 }
169 vtac->regs = devm_ioremap_resource(dev, res);
170 if (IS_ERR(vtac->regs))
171 return PTR_ERR(vtac->regs);
172
173
174 vtac->clk = devm_clk_get(dev, "vtac");
175 if (IS_ERR(vtac->clk)) {
176 DRM_ERROR("Cannot get vtac clock\n");
177 return PTR_ERR(vtac->clk);
178 }
179
180 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
181 if (res) {
182 vtac->phy_regs = devm_ioremap_nocache(dev, res->start,
183 resource_size(res));
184 sti_vtac_tx_set_config(vtac);
185 } else {
186
187 sti_vtac_rx_set_config(vtac);
188 }
189
190 platform_set_drvdata(pdev, vtac);
191 DRM_INFO("%s %s\n", __func__, dev_name(vtac->dev));
192
193 return 0;
194}
195
196static int sti_vtac_remove(struct platform_device *pdev)
197{
198 return 0;
199}
200
201struct platform_driver sti_vtac_driver = {
202 .driver = {
203 .name = "sti-vtac",
204 .owner = THIS_MODULE,
205 .of_match_table = vtac_of_match,
206 },
207 .probe = sti_vtac_probe,
208 .remove = sti_vtac_remove,
209};
210
211module_platform_driver(sti_vtac_driver);
212
213MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
214MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
215MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
new file mode 100644
index 000000000000..740d6e347a62
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -0,0 +1,366 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * Vincent Abriou <vincent.abriou@st.com>
6 * for STMicroelectronics.
7 * License terms: GNU General Public License (GPL), version 2
8 */
9
10#include <linux/module.h>
11#include <linux/notifier.h>
12#include <linux/platform_device.h>
13
14#include <drm/drmP.h>
15
16#include "sti_vtg.h"
17
18#define VTG_TYPE_MASTER 0
19#define VTG_TYPE_SLAVE_BY_EXT0 1
20
21/* registers offset */
22#define VTG_MODE 0x0000
23#define VTG_CLKLN 0x0008
24#define VTG_HLFLN 0x000C
25#define VTG_DRST_AUTOC 0x0010
26#define VTG_VID_TFO 0x0040
27#define VTG_VID_TFS 0x0044
28#define VTG_VID_BFO 0x0048
29#define VTG_VID_BFS 0x004C
30
31#define VTG_HOST_ITS 0x0078
32#define VTG_HOST_ITS_BCLR 0x007C
33#define VTG_HOST_ITM_BCLR 0x0088
34#define VTG_HOST_ITM_BSET 0x008C
35
36#define VTG_H_HD_1 0x00C0
37#define VTG_TOP_V_VD_1 0x00C4
38#define VTG_BOT_V_VD_1 0x00C8
39#define VTG_TOP_V_HD_1 0x00CC
40#define VTG_BOT_V_HD_1 0x00D0
41
42#define VTG_H_HD_2 0x00E0
43#define VTG_TOP_V_VD_2 0x00E4
44#define VTG_BOT_V_VD_2 0x00E8
45#define VTG_TOP_V_HD_2 0x00EC
46#define VTG_BOT_V_HD_2 0x00F0
47
48#define VTG_H_HD_3 0x0100
49#define VTG_TOP_V_VD_3 0x0104
50#define VTG_BOT_V_VD_3 0x0108
51#define VTG_TOP_V_HD_3 0x010C
52#define VTG_BOT_V_HD_3 0x0110
53
54#define VTG_IRQ_BOTTOM BIT(0)
55#define VTG_IRQ_TOP BIT(1)
56#define VTG_IRQ_MASK (VTG_IRQ_TOP | VTG_IRQ_BOTTOM)
57
58/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
59#define AWG_DELAY_HD (-9)
60#define AWG_DELAY_ED (-8)
61#define AWG_DELAY_SD (-7)
62
63LIST_HEAD(vtg_lookup);
64
65/**
66 * STI VTG structure
67 *
68 * @dev: pointer to device driver
69 * @data: data associated to the device
70 * @irq: VTG irq
71 * @type: VTG type (main or aux)
72 * @notifier_list: notifier callback
73 * @crtc_id: the crtc id for vblank event
74 * @slave: slave vtg
75 * @link: List node to link the structure in lookup list
76 */
77struct sti_vtg {
78 struct device *dev;
79 struct device_node *np;
80 void __iomem *regs;
81 int irq;
82 u32 irq_status;
83 struct raw_notifier_head notifier_list;
84 int crtc_id;
85 struct sti_vtg *slave;
86 struct list_head link;
87};
88
89static void vtg_register(struct sti_vtg *vtg)
90{
91 list_add_tail(&vtg->link, &vtg_lookup);
92}
93
94struct sti_vtg *of_vtg_find(struct device_node *np)
95{
96 struct sti_vtg *vtg;
97
98 list_for_each_entry(vtg, &vtg_lookup, link) {
99 if (vtg->np == np)
100 return vtg;
101 }
102 return NULL;
103}
104EXPORT_SYMBOL(of_vtg_find);
105
106static void vtg_reset(struct sti_vtg *vtg)
107{
108 /* reset slave and then master */
109 if (vtg->slave)
110 vtg_reset(vtg->slave);
111
112 writel(1, vtg->regs + VTG_DRST_AUTOC);
113}
114
115static void vtg_set_mode(struct sti_vtg *vtg,
116 int type, const struct drm_display_mode *mode)
117{
118 u32 tmp;
119
120 if (vtg->slave)
121 vtg_set_mode(vtg->slave, VTG_TYPE_SLAVE_BY_EXT0, mode);
122
123 writel(mode->htotal, vtg->regs + VTG_CLKLN);
124 writel(mode->vtotal * 2, vtg->regs + VTG_HLFLN);
125
126 tmp = (mode->vtotal - mode->vsync_start + 1) << 16;
127 tmp |= mode->htotal - mode->hsync_start;
128 writel(tmp, vtg->regs + VTG_VID_TFO);
129 writel(tmp, vtg->regs + VTG_VID_BFO);
130
131 tmp = (mode->vdisplay + mode->vtotal - mode->vsync_start + 1) << 16;
132 tmp |= mode->hdisplay + mode->htotal - mode->hsync_start;
133 writel(tmp, vtg->regs + VTG_VID_TFS);
134 writel(tmp, vtg->regs + VTG_VID_BFS);
135
136 /* prepare VTG set 1 and 2 for HDMI and VTG set 3 for HD DAC */
137 tmp = (mode->hsync_end - mode->hsync_start) << 16;
138 writel(tmp, vtg->regs + VTG_H_HD_1);
139 writel(tmp, vtg->regs + VTG_H_HD_2);
140
141 tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
142 tmp |= 1;
143 writel(tmp, vtg->regs + VTG_TOP_V_VD_1);
144 writel(tmp, vtg->regs + VTG_BOT_V_VD_1);
145 writel(0, vtg->regs + VTG_TOP_V_HD_1);
146 writel(0, vtg->regs + VTG_BOT_V_HD_1);
147
148 /* prepare VTG set 2 for for HD DCS */
149 writel(tmp, vtg->regs + VTG_TOP_V_VD_2);
150 writel(tmp, vtg->regs + VTG_BOT_V_VD_2);
151 writel(0, vtg->regs + VTG_TOP_V_HD_2);
152 writel(0, vtg->regs + VTG_BOT_V_HD_2);
153
154 /* prepare VTG set 3 for HD Analog in HD mode */
155 tmp = (mode->hsync_end - mode->hsync_start + AWG_DELAY_HD) << 16;
156 tmp |= mode->htotal + AWG_DELAY_HD;
157 writel(tmp, vtg->regs + VTG_H_HD_3);
158
159 tmp = (mode->vsync_end - mode->vsync_start) << 16;
160 tmp |= mode->vtotal;
161 writel(tmp, vtg->regs + VTG_TOP_V_VD_3);
162 writel(tmp, vtg->regs + VTG_BOT_V_VD_3);
163
164 tmp = (mode->htotal + AWG_DELAY_HD) << 16;
165 tmp |= mode->htotal + AWG_DELAY_HD;
166 writel(tmp, vtg->regs + VTG_TOP_V_HD_3);
167 writel(tmp, vtg->regs + VTG_BOT_V_HD_3);
168
169 /* mode */
170 writel(type, vtg->regs + VTG_MODE);
171}
172
173static void vtg_enable_irq(struct sti_vtg *vtg)
174{
175 /* clear interrupt status and mask */
176 writel(0xFFFF, vtg->regs + VTG_HOST_ITS_BCLR);
177 writel(0xFFFF, vtg->regs + VTG_HOST_ITM_BCLR);
178 writel(VTG_IRQ_MASK, vtg->regs + VTG_HOST_ITM_BSET);
179}
180
181void sti_vtg_set_config(struct sti_vtg *vtg,
182 const struct drm_display_mode *mode)
183{
184 /* write configuration */
185 vtg_set_mode(vtg, VTG_TYPE_MASTER, mode);
186
187 vtg_reset(vtg);
188
189 /* enable irq for the vtg vblank synchro */
190 if (vtg->slave)
191 vtg_enable_irq(vtg->slave);
192 else
193 vtg_enable_irq(vtg);
194}
195EXPORT_SYMBOL(sti_vtg_set_config);
196
197/**
198 * sti_vtg_get_line_number
199 *
200 * @mode: display mode to be used
201 * @y: line
202 *
203 * Return the line number according to the display mode taking
204 * into account the Sync and Back Porch information.
205 * Video frame line numbers start at 1, y starts at 0.
206 * In interlaced modes the start line is the field line number of the odd
207 * field, but y is still defined as a progressive frame.
208 */
209u32 sti_vtg_get_line_number(struct drm_display_mode mode, int y)
210{
211 u32 start_line = mode.vtotal - mode.vsync_start + 1;
212
213 if (mode.flags & DRM_MODE_FLAG_INTERLACE)
214 start_line *= 2;
215
216 return start_line + y;
217}
218EXPORT_SYMBOL(sti_vtg_get_line_number);
219
220/**
221 * sti_vtg_get_pixel_number
222 *
223 * @mode: display mode to be used
224 * @x: row
225 *
226 * Return the pixel number according to the display mode taking
227 * into account the Sync and Back Porch information.
228 * Pixels are counted from 0.
229 */
230u32 sti_vtg_get_pixel_number(struct drm_display_mode mode, int x)
231{
232 return mode.htotal - mode.hsync_start + x;
233}
234EXPORT_SYMBOL(sti_vtg_get_pixel_number);
235
236int sti_vtg_register_client(struct sti_vtg *vtg,
237 struct notifier_block *nb, int crtc_id)
238{
239 if (vtg->slave)
240 return sti_vtg_register_client(vtg->slave, nb, crtc_id);
241
242 vtg->crtc_id = crtc_id;
243 return raw_notifier_chain_register(&vtg->notifier_list, nb);
244}
245EXPORT_SYMBOL(sti_vtg_register_client);
246
247int sti_vtg_unregister_client(struct sti_vtg *vtg, struct notifier_block *nb)
248{
249 if (vtg->slave)
250 return sti_vtg_unregister_client(vtg->slave, nb);
251
252 return raw_notifier_chain_unregister(&vtg->notifier_list, nb);
253}
254EXPORT_SYMBOL(sti_vtg_unregister_client);
255
256static irqreturn_t vtg_irq_thread(int irq, void *arg)
257{
258 struct sti_vtg *vtg = arg;
259 u32 event;
260
261 event = (vtg->irq_status & VTG_IRQ_TOP) ?
262 VTG_TOP_FIELD_EVENT : VTG_BOTTOM_FIELD_EVENT;
263
264 raw_notifier_call_chain(&vtg->notifier_list, event, &vtg->crtc_id);
265
266 return IRQ_HANDLED;
267}
268
269static irqreturn_t vtg_irq(int irq, void *arg)
270{
271 struct sti_vtg *vtg = arg;
272
273 vtg->irq_status = readl(vtg->regs + VTG_HOST_ITS);
274
275 writel(vtg->irq_status, vtg->regs + VTG_HOST_ITS_BCLR);
276
277 /* force sync bus write */
278 readl(vtg->regs + VTG_HOST_ITS);
279
280 return IRQ_WAKE_THREAD;
281}
282
283static int vtg_probe(struct platform_device *pdev)
284{
285 struct device *dev = &pdev->dev;
286 struct device_node *np;
287 struct sti_vtg *vtg;
288 struct resource *res;
289 char irq_name[32];
290 int ret;
291
292 vtg = devm_kzalloc(dev, sizeof(*vtg), GFP_KERNEL);
293 if (!vtg)
294 return -ENOMEM;
295
296 vtg->dev = dev;
297 vtg->np = pdev->dev.of_node;
298
299 /* Get Memory ressources */
300 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
301 if (!res) {
302 DRM_ERROR("Get memory resource failed\n");
303 return -ENOMEM;
304 }
305 vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
306
307 np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0);
308 if (np) {
309 vtg->slave = of_vtg_find(np);
310
311 if (!vtg->slave)
312 return -EPROBE_DEFER;
313 } else {
314 vtg->irq = platform_get_irq(pdev, 0);
315 if (IS_ERR_VALUE(vtg->irq)) {
316 DRM_ERROR("Failed to get VTG interrupt\n");
317 return vtg->irq;
318 }
319
320 snprintf(irq_name, sizeof(irq_name), "vsync-%s",
321 dev_name(vtg->dev));
322
323 RAW_INIT_NOTIFIER_HEAD(&vtg->notifier_list);
324
325 ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq,
326 vtg_irq_thread, IRQF_ONESHOT, irq_name, vtg);
327 if (IS_ERR_VALUE(ret)) {
328 DRM_ERROR("Failed to register VTG interrupt\n");
329 return ret;
330 }
331 }
332
333 vtg_register(vtg);
334 platform_set_drvdata(pdev, vtg);
335
336 DRM_INFO("%s %s\n", __func__, dev_name(vtg->dev));
337
338 return 0;
339}
340
341static int vtg_remove(struct platform_device *pdev)
342{
343 return 0;
344}
345
346static const struct of_device_id vtg_of_match[] = {
347 { .compatible = "st,vtg", },
348 { /* sentinel */ }
349};
350MODULE_DEVICE_TABLE(of, vtg_of_match);
351
352struct platform_driver sti_vtg_driver = {
353 .driver = {
354 .name = "sti-vtg",
355 .owner = THIS_MODULE,
356 .of_match_table = vtg_of_match,
357 },
358 .probe = vtg_probe,
359 .remove = vtg_remove,
360};
361
362module_platform_driver(sti_vtg_driver);
363
364MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
365MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
366MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_vtg.h b/drivers/gpu/drm/sti/sti_vtg.h
new file mode 100644
index 000000000000..e84d23f1f57f
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vtg.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_VTG_H_
8#define _STI_VTG_H_
9
10#define VTG_TOP_FIELD_EVENT 1
11#define VTG_BOTTOM_FIELD_EVENT 2
12
13struct sti_vtg;
14struct drm_display_mode;
15struct notifier_block;
16
17struct sti_vtg *of_vtg_find(struct device_node *np);
18void sti_vtg_set_config(struct sti_vtg *vtg,
19 const struct drm_display_mode *mode);
20int sti_vtg_register_client(struct sti_vtg *vtg,
21 struct notifier_block *nb, int crtc_id);
22int sti_vtg_unregister_client(struct sti_vtg *vtg,
23 struct notifier_block *nb);
24
25u32 sti_vtg_get_line_number(struct drm_display_mode mode, int y);
26u32 sti_vtg_get_pixel_number(struct drm_display_mode mode, int x);
27
28#endif
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index ef40381f3909..6553fd238685 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -18,6 +18,8 @@
18struct tegra_dc_soc_info { 18struct tegra_dc_soc_info {
19 bool supports_interlacing; 19 bool supports_interlacing;
20 bool supports_cursor; 20 bool supports_cursor;
21 bool supports_block_linear;
22 unsigned int pitch_align;
21}; 23};
22 24
23struct tegra_plane { 25struct tegra_plane {
@@ -212,15 +214,44 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
212 tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET); 214 tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
213 tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET); 215 tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
214 216
215 if (window->tiled) { 217 if (dc->soc->supports_block_linear) {
216 value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV | 218 unsigned long height = window->tiling.value;
217 DC_WIN_BUFFER_ADDR_MODE_TILE; 219
220 switch (window->tiling.mode) {
221 case TEGRA_BO_TILING_MODE_PITCH:
222 value = DC_WINBUF_SURFACE_KIND_PITCH;
223 break;
224
225 case TEGRA_BO_TILING_MODE_TILED:
226 value = DC_WINBUF_SURFACE_KIND_TILED;
227 break;
228
229 case TEGRA_BO_TILING_MODE_BLOCK:
230 value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
231 DC_WINBUF_SURFACE_KIND_BLOCK;
232 break;
233 }
234
235 tegra_dc_writel(dc, value, DC_WINBUF_SURFACE_KIND);
218 } else { 236 } else {
219 value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV | 237 switch (window->tiling.mode) {
220 DC_WIN_BUFFER_ADDR_MODE_LINEAR; 238 case TEGRA_BO_TILING_MODE_PITCH:
221 } 239 value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
240 DC_WIN_BUFFER_ADDR_MODE_LINEAR;
241 break;
222 242
223 tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE); 243 case TEGRA_BO_TILING_MODE_TILED:
244 value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
245 DC_WIN_BUFFER_ADDR_MODE_TILE;
246 break;
247
248 case TEGRA_BO_TILING_MODE_BLOCK:
249 DRM_ERROR("hardware doesn't support block linear mode\n");
250 return -EINVAL;
251 }
252
253 tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
254 }
224 255
225 value = WIN_ENABLE; 256 value = WIN_ENABLE;
226 257
@@ -288,6 +319,7 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
288 struct tegra_dc *dc = to_tegra_dc(crtc); 319 struct tegra_dc *dc = to_tegra_dc(crtc);
289 struct tegra_dc_window window; 320 struct tegra_dc_window window;
290 unsigned int i; 321 unsigned int i;
322 int err;
291 323
292 memset(&window, 0, sizeof(window)); 324 memset(&window, 0, sizeof(window));
293 window.src.x = src_x >> 16; 325 window.src.x = src_x >> 16;
@@ -301,7 +333,10 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
301 window.format = tegra_dc_format(fb->pixel_format, &window.swap); 333 window.format = tegra_dc_format(fb->pixel_format, &window.swap);
302 window.bits_per_pixel = fb->bits_per_pixel; 334 window.bits_per_pixel = fb->bits_per_pixel;
303 window.bottom_up = tegra_fb_is_bottom_up(fb); 335 window.bottom_up = tegra_fb_is_bottom_up(fb);
304 window.tiled = tegra_fb_is_tiled(fb); 336
337 err = tegra_fb_get_tiling(fb, &window.tiling);
338 if (err < 0)
339 return err;
305 340
306 for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { 341 for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
307 struct tegra_bo *bo = tegra_fb_get_plane(fb, i); 342 struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
@@ -402,8 +437,14 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
402{ 437{
403 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0); 438 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
404 unsigned int h_offset = 0, v_offset = 0; 439 unsigned int h_offset = 0, v_offset = 0;
440 struct tegra_bo_tiling tiling;
405 unsigned int format, swap; 441 unsigned int format, swap;
406 unsigned long value; 442 unsigned long value;
443 int err;
444
445 err = tegra_fb_get_tiling(fb, &tiling);
446 if (err < 0)
447 return err;
407 448
408 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); 449 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
409 450
@@ -417,15 +458,44 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
417 tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH); 458 tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH);
418 tegra_dc_writel(dc, swap, DC_WIN_BYTE_SWAP); 459 tegra_dc_writel(dc, swap, DC_WIN_BYTE_SWAP);
419 460
420 if (tegra_fb_is_tiled(fb)) { 461 if (dc->soc->supports_block_linear) {
421 value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV | 462 unsigned long height = tiling.value;
422 DC_WIN_BUFFER_ADDR_MODE_TILE; 463
464 switch (tiling.mode) {
465 case TEGRA_BO_TILING_MODE_PITCH:
466 value = DC_WINBUF_SURFACE_KIND_PITCH;
467 break;
468
469 case TEGRA_BO_TILING_MODE_TILED:
470 value = DC_WINBUF_SURFACE_KIND_TILED;
471 break;
472
473 case TEGRA_BO_TILING_MODE_BLOCK:
474 value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
475 DC_WINBUF_SURFACE_KIND_BLOCK;
476 break;
477 }
478
479 tegra_dc_writel(dc, value, DC_WINBUF_SURFACE_KIND);
423 } else { 480 } else {
424 value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV | 481 switch (tiling.mode) {
425 DC_WIN_BUFFER_ADDR_MODE_LINEAR; 482 case TEGRA_BO_TILING_MODE_PITCH:
426 } 483 value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
484 DC_WIN_BUFFER_ADDR_MODE_LINEAR;
485 break;
486
487 case TEGRA_BO_TILING_MODE_TILED:
488 value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
489 DC_WIN_BUFFER_ADDR_MODE_TILE;
490 break;
491
492 case TEGRA_BO_TILING_MODE_BLOCK:
493 DRM_ERROR("hardware doesn't support block linear mode\n");
494 return -EINVAL;
495 }
427 496
428 tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE); 497 tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
498 }
429 499
430 /* make sure bottom-up buffers are properly displayed */ 500 /* make sure bottom-up buffers are properly displayed */
431 if (tegra_fb_is_bottom_up(fb)) { 501 if (tegra_fb_is_bottom_up(fb)) {
@@ -1214,12 +1284,20 @@ static int tegra_dc_init(struct host1x_client *client)
1214{ 1284{
1215 struct drm_device *drm = dev_get_drvdata(client->parent); 1285 struct drm_device *drm = dev_get_drvdata(client->parent);
1216 struct tegra_dc *dc = host1x_client_to_dc(client); 1286 struct tegra_dc *dc = host1x_client_to_dc(client);
1287 struct tegra_drm *tegra = drm->dev_private;
1217 int err; 1288 int err;
1218 1289
1219 drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs); 1290 drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
1220 drm_mode_crtc_set_gamma_size(&dc->base, 256); 1291 drm_mode_crtc_set_gamma_size(&dc->base, 256);
1221 drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs); 1292 drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
1222 1293
1294 /*
1295 * Keep track of the minimum pitch alignment across all display
1296 * controllers.
1297 */
1298 if (dc->soc->pitch_align > tegra->pitch_align)
1299 tegra->pitch_align = dc->soc->pitch_align;
1300
1223 err = tegra_dc_rgb_init(drm, dc); 1301 err = tegra_dc_rgb_init(drm, dc);
1224 if (err < 0 && err != -ENODEV) { 1302 if (err < 0 && err != -ENODEV) {
1225 dev_err(dc->dev, "failed to initialize RGB output: %d\n", err); 1303 dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
@@ -1277,16 +1355,29 @@ static const struct host1x_client_ops dc_client_ops = {
1277static const struct tegra_dc_soc_info tegra20_dc_soc_info = { 1355static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
1278 .supports_interlacing = false, 1356 .supports_interlacing = false,
1279 .supports_cursor = false, 1357 .supports_cursor = false,
1358 .supports_block_linear = false,
1359 .pitch_align = 8,
1280}; 1360};
1281 1361
1282static const struct tegra_dc_soc_info tegra30_dc_soc_info = { 1362static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
1283 .supports_interlacing = false, 1363 .supports_interlacing = false,
1284 .supports_cursor = false, 1364 .supports_cursor = false,
1365 .supports_block_linear = false,
1366 .pitch_align = 8,
1367};
1368
1369static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
1370 .supports_interlacing = false,
1371 .supports_cursor = false,
1372 .supports_block_linear = false,
1373 .pitch_align = 64,
1285}; 1374};
1286 1375
1287static const struct tegra_dc_soc_info tegra124_dc_soc_info = { 1376static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
1288 .supports_interlacing = true, 1377 .supports_interlacing = true,
1289 .supports_cursor = true, 1378 .supports_cursor = true,
1379 .supports_block_linear = true,
1380 .pitch_align = 64,
1290}; 1381};
1291 1382
1292static const struct of_device_id tegra_dc_of_match[] = { 1383static const struct of_device_id tegra_dc_of_match[] = {
@@ -1303,6 +1394,7 @@ static const struct of_device_id tegra_dc_of_match[] = {
1303 /* sentinel */ 1394 /* sentinel */
1304 } 1395 }
1305}; 1396};
1397MODULE_DEVICE_TABLE(of, tegra_dc_of_match);
1306 1398
1307static int tegra_dc_parse_dt(struct tegra_dc *dc) 1399static int tegra_dc_parse_dt(struct tegra_dc *dc)
1308{ 1400{
@@ -1430,6 +1522,7 @@ static int tegra_dc_remove(struct platform_device *pdev)
1430 return err; 1522 return err;
1431 } 1523 }
1432 1524
1525 reset_control_assert(dc->rst);
1433 clk_disable_unprepare(dc->clk); 1526 clk_disable_unprepare(dc->clk);
1434 1527
1435 return 0; 1528 return 0;
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 78c5feff95d2..705c93b00794 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -428,6 +428,11 @@
428#define DC_WINBUF_ADDR_V_OFFSET_NS 0x809 428#define DC_WINBUF_ADDR_V_OFFSET_NS 0x809
429 429
430#define DC_WINBUF_UFLOW_STATUS 0x80a 430#define DC_WINBUF_UFLOW_STATUS 0x80a
431#define DC_WINBUF_SURFACE_KIND 0x80b
432#define DC_WINBUF_SURFACE_KIND_PITCH (0 << 0)
433#define DC_WINBUF_SURFACE_KIND_TILED (1 << 0)
434#define DC_WINBUF_SURFACE_KIND_BLOCK (2 << 0)
435#define DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(x) (((x) & 0x7) << 4)
431 436
432#define DC_WINBUF_AD_UFLOW_STATUS 0xbca 437#define DC_WINBUF_AD_UFLOW_STATUS 0xbca
433#define DC_WINBUF_BD_UFLOW_STATUS 0xdca 438#define DC_WINBUF_BD_UFLOW_STATUS 0xdca
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 3f132e356e9c..708f783ead47 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -382,6 +382,7 @@ static const struct of_device_id tegra_dpaux_of_match[] = {
382 { .compatible = "nvidia,tegra124-dpaux", }, 382 { .compatible = "nvidia,tegra124-dpaux", },
383 { }, 383 { },
384}; 384};
385MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match);
385 386
386struct platform_driver tegra_dpaux_driver = { 387struct platform_driver tegra_dpaux_driver = {
387 .driver = { 388 .driver = {
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 3396f9f6a9f7..59736bb810cd 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -40,6 +40,12 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
40 40
41 drm_mode_config_init(drm); 41 drm_mode_config_init(drm);
42 42
43 err = tegra_drm_fb_prepare(drm);
44 if (err < 0)
45 return err;
46
47 drm_kms_helper_poll_init(drm);
48
43 err = host1x_device_init(device); 49 err = host1x_device_init(device);
44 if (err < 0) 50 if (err < 0)
45 return err; 51 return err;
@@ -59,8 +65,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
59 if (err < 0) 65 if (err < 0)
60 return err; 66 return err;
61 67
62 drm_kms_helper_poll_init(drm);
63
64 return 0; 68 return 0;
65} 69}
66 70
@@ -128,6 +132,45 @@ host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle)
128 return &bo->base; 132 return &bo->base;
129} 133}
130 134
135static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
136 struct drm_tegra_reloc __user *src,
137 struct drm_device *drm,
138 struct drm_file *file)
139{
140 u32 cmdbuf, target;
141 int err;
142
143 err = get_user(cmdbuf, &src->cmdbuf.handle);
144 if (err < 0)
145 return err;
146
147 err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
148 if (err < 0)
149 return err;
150
151 err = get_user(target, &src->target.handle);
152 if (err < 0)
153 return err;
154
155 err = get_user(dest->target.offset, &src->cmdbuf.offset);
156 if (err < 0)
157 return err;
158
159 err = get_user(dest->shift, &src->shift);
160 if (err < 0)
161 return err;
162
163 dest->cmdbuf.bo = host1x_bo_lookup(drm, file, cmdbuf);
164 if (!dest->cmdbuf.bo)
165 return -ENOENT;
166
167 dest->target.bo = host1x_bo_lookup(drm, file, target);
168 if (!dest->target.bo)
169 return -ENOENT;
170
171 return 0;
172}
173
131int tegra_drm_submit(struct tegra_drm_context *context, 174int tegra_drm_submit(struct tegra_drm_context *context,
132 struct drm_tegra_submit *args, struct drm_device *drm, 175 struct drm_tegra_submit *args, struct drm_device *drm,
133 struct drm_file *file) 176 struct drm_file *file)
@@ -180,26 +223,13 @@ int tegra_drm_submit(struct tegra_drm_context *context,
180 cmdbufs++; 223 cmdbufs++;
181 } 224 }
182 225
183 if (copy_from_user(job->relocarray, relocs, 226 /* copy and resolve relocations from submit */
184 sizeof(*relocs) * num_relocs)) {
185 err = -EFAULT;
186 goto fail;
187 }
188
189 while (num_relocs--) { 227 while (num_relocs--) {
190 struct host1x_reloc *reloc = &job->relocarray[num_relocs]; 228 err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs],
191 struct host1x_bo *cmdbuf, *target; 229 &relocs[num_relocs], drm,
192 230 file);
193 cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf); 231 if (err < 0)
194 target = host1x_bo_lookup(drm, file, (u32)reloc->target);
195
196 reloc->cmdbuf = cmdbuf;
197 reloc->target = target;
198
199 if (!reloc->target || !reloc->cmdbuf) {
200 err = -ENOENT;
201 goto fail; 232 goto fail;
202 }
203 } 233 }
204 234
205 if (copy_from_user(job->waitchk, waitchks, 235 if (copy_from_user(job->waitchk, waitchks,
@@ -451,11 +481,151 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
451 481
452 return 0; 482 return 0;
453} 483}
484
485static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
486 struct drm_file *file)
487{
488 struct drm_tegra_gem_set_tiling *args = data;
489 enum tegra_bo_tiling_mode mode;
490 struct drm_gem_object *gem;
491 unsigned long value = 0;
492 struct tegra_bo *bo;
493
494 switch (args->mode) {
495 case DRM_TEGRA_GEM_TILING_MODE_PITCH:
496 mode = TEGRA_BO_TILING_MODE_PITCH;
497
498 if (args->value != 0)
499 return -EINVAL;
500
501 break;
502
503 case DRM_TEGRA_GEM_TILING_MODE_TILED:
504 mode = TEGRA_BO_TILING_MODE_TILED;
505
506 if (args->value != 0)
507 return -EINVAL;
508
509 break;
510
511 case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
512 mode = TEGRA_BO_TILING_MODE_BLOCK;
513
514 if (args->value > 5)
515 return -EINVAL;
516
517 value = args->value;
518 break;
519
520 default:
521 return -EINVAL;
522 }
523
524 gem = drm_gem_object_lookup(drm, file, args->handle);
525 if (!gem)
526 return -ENOENT;
527
528 bo = to_tegra_bo(gem);
529
530 bo->tiling.mode = mode;
531 bo->tiling.value = value;
532
533 drm_gem_object_unreference(gem);
534
535 return 0;
536}
537
538static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
539 struct drm_file *file)
540{
541 struct drm_tegra_gem_get_tiling *args = data;
542 struct drm_gem_object *gem;
543 struct tegra_bo *bo;
544 int err = 0;
545
546 gem = drm_gem_object_lookup(drm, file, args->handle);
547 if (!gem)
548 return -ENOENT;
549
550 bo = to_tegra_bo(gem);
551
552 switch (bo->tiling.mode) {
553 case TEGRA_BO_TILING_MODE_PITCH:
554 args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
555 args->value = 0;
556 break;
557
558 case TEGRA_BO_TILING_MODE_TILED:
559 args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
560 args->value = 0;
561 break;
562
563 case TEGRA_BO_TILING_MODE_BLOCK:
564 args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
565 args->value = bo->tiling.value;
566 break;
567
568 default:
569 err = -EINVAL;
570 break;
571 }
572
573 drm_gem_object_unreference(gem);
574
575 return err;
576}
577
578static int tegra_gem_set_flags(struct drm_device *drm, void *data,
579 struct drm_file *file)
580{
581 struct drm_tegra_gem_set_flags *args = data;
582 struct drm_gem_object *gem;
583 struct tegra_bo *bo;
584
585 if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
586 return -EINVAL;
587
588 gem = drm_gem_object_lookup(drm, file, args->handle);
589 if (!gem)
590 return -ENOENT;
591
592 bo = to_tegra_bo(gem);
593 bo->flags = 0;
594
595 if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
596 bo->flags |= TEGRA_BO_BOTTOM_UP;
597
598 drm_gem_object_unreference(gem);
599
600 return 0;
601}
602
603static int tegra_gem_get_flags(struct drm_device *drm, void *data,
604 struct drm_file *file)
605{
606 struct drm_tegra_gem_get_flags *args = data;
607 struct drm_gem_object *gem;
608 struct tegra_bo *bo;
609
610 gem = drm_gem_object_lookup(drm, file, args->handle);
611 if (!gem)
612 return -ENOENT;
613
614 bo = to_tegra_bo(gem);
615 args->flags = 0;
616
617 if (bo->flags & TEGRA_BO_BOTTOM_UP)
618 args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
619
620 drm_gem_object_unreference(gem);
621
622 return 0;
623}
454#endif 624#endif
455 625
456static const struct drm_ioctl_desc tegra_drm_ioctls[] = { 626static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
457#ifdef CONFIG_DRM_TEGRA_STAGING 627#ifdef CONFIG_DRM_TEGRA_STAGING
458 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH), 628 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED),
459 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED), 629 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
460 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED), 630 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
461 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED), 631 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
@@ -465,6 +635,10 @@ static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
465 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED), 635 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
466 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED), 636 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
467 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, DRM_UNLOCKED), 637 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, DRM_UNLOCKED),
638 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, DRM_UNLOCKED),
639 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, DRM_UNLOCKED),
640 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, DRM_UNLOCKED),
641 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, DRM_UNLOCKED),
468#endif 642#endif
469}; 643};
470 644
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 6b8fe9d86ed4..e89c70fa82d5 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -19,6 +19,8 @@
19#include <drm/drm_fb_helper.h> 19#include <drm/drm_fb_helper.h>
20#include <drm/drm_fixed.h> 20#include <drm/drm_fixed.h>
21 21
22#include "gem.h"
23
22struct reset_control; 24struct reset_control;
23 25
24struct tegra_fb { 26struct tegra_fb {
@@ -43,6 +45,8 @@ struct tegra_drm {
43#ifdef CONFIG_DRM_TEGRA_FBDEV 45#ifdef CONFIG_DRM_TEGRA_FBDEV
44 struct tegra_fbdev *fbdev; 46 struct tegra_fbdev *fbdev;
45#endif 47#endif
48
49 unsigned int pitch_align;
46}; 50};
47 51
48struct tegra_drm_client; 52struct tegra_drm_client;
@@ -160,7 +164,8 @@ struct tegra_dc_window {
160 unsigned int stride[2]; 164 unsigned int stride[2];
161 unsigned long base[3]; 165 unsigned long base[3];
162 bool bottom_up; 166 bool bottom_up;
163 bool tiled; 167
168 struct tegra_bo_tiling tiling;
164}; 169};
165 170
166/* from dc.c */ 171/* from dc.c */
@@ -279,7 +284,9 @@ int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
279struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer, 284struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
280 unsigned int index); 285 unsigned int index);
281bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer); 286bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
282bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer); 287int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
288 struct tegra_bo_tiling *tiling);
289int tegra_drm_fb_prepare(struct drm_device *drm);
283int tegra_drm_fb_init(struct drm_device *drm); 290int tegra_drm_fb_init(struct drm_device *drm);
284void tegra_drm_fb_exit(struct drm_device *drm); 291void tegra_drm_fb_exit(struct drm_device *drm);
285#ifdef CONFIG_DRM_TEGRA_FBDEV 292#ifdef CONFIG_DRM_TEGRA_FBDEV
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index bd56f2affa78..f7874458926a 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -474,7 +474,8 @@ static int tegra_output_dsi_enable(struct tegra_output *output)
474 tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL); 474 tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
475 475
476 value = tegra_dsi_readl(dsi, DSI_CONTROL); 476 value = tegra_dsi_readl(dsi, DSI_CONTROL);
477 value |= DSI_CONTROL_HS_CLK_CTRL; 477 if (dsi->flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
478 value |= DSI_CONTROL_HS_CLK_CTRL;
478 value &= ~DSI_CONTROL_TX_TRIG(3); 479 value &= ~DSI_CONTROL_TX_TRIG(3);
479 value &= ~DSI_CONTROL_DCS_ENABLE; 480 value &= ~DSI_CONTROL_DCS_ENABLE;
480 value |= DSI_CONTROL_VIDEO_ENABLE; 481 value |= DSI_CONTROL_VIDEO_ENABLE;
@@ -982,6 +983,7 @@ static const struct of_device_id tegra_dsi_of_match[] = {
982 { .compatible = "nvidia,tegra114-dsi", }, 983 { .compatible = "nvidia,tegra114-dsi", },
983 { }, 984 { },
984}; 985};
986MODULE_DEVICE_TABLE(of, tegra_dsi_of_match);
985 987
986struct platform_driver tegra_dsi_driver = { 988struct platform_driver tegra_dsi_driver = {
987 .driver = { 989 .driver = {
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 9798a7080322..3513d12d5aa1 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -46,14 +46,15 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer)
46 return false; 46 return false;
47} 47}
48 48
49bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer) 49int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
50 struct tegra_bo_tiling *tiling)
50{ 51{
51 struct tegra_fb *fb = to_tegra_fb(framebuffer); 52 struct tegra_fb *fb = to_tegra_fb(framebuffer);
52 53
53 if (fb->planes[0]->flags & TEGRA_BO_TILED) 54 /* TODO: handle YUV formats? */
54 return true; 55 *tiling = fb->planes[0]->tiling;
55 56
56 return false; 57 return 0;
57} 58}
58 59
59static void tegra_fb_destroy(struct drm_framebuffer *framebuffer) 60static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
@@ -193,6 +194,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
193 struct drm_fb_helper_surface_size *sizes) 194 struct drm_fb_helper_surface_size *sizes)
194{ 195{
195 struct tegra_fbdev *fbdev = to_tegra_fbdev(helper); 196 struct tegra_fbdev *fbdev = to_tegra_fbdev(helper);
197 struct tegra_drm *tegra = helper->dev->dev_private;
196 struct drm_device *drm = helper->dev; 198 struct drm_device *drm = helper->dev;
197 struct drm_mode_fb_cmd2 cmd = { 0 }; 199 struct drm_mode_fb_cmd2 cmd = { 0 };
198 unsigned int bytes_per_pixel; 200 unsigned int bytes_per_pixel;
@@ -207,7 +209,8 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
207 209
208 cmd.width = sizes->surface_width; 210 cmd.width = sizes->surface_width;
209 cmd.height = sizes->surface_height; 211 cmd.height = sizes->surface_height;
210 cmd.pitches[0] = sizes->surface_width * bytes_per_pixel; 212 cmd.pitches[0] = round_up(sizes->surface_width * bytes_per_pixel,
213 tegra->pitch_align);
211 cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 214 cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
212 sizes->surface_depth); 215 sizes->surface_depth);
213 216
@@ -267,18 +270,13 @@ release:
267 return err; 270 return err;
268} 271}
269 272
270static struct drm_fb_helper_funcs tegra_fb_helper_funcs = { 273static const struct drm_fb_helper_funcs tegra_fb_helper_funcs = {
271 .fb_probe = tegra_fbdev_probe, 274 .fb_probe = tegra_fbdev_probe,
272}; 275};
273 276
274static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm, 277static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm)
275 unsigned int preferred_bpp,
276 unsigned int num_crtc,
277 unsigned int max_connectors)
278{ 278{
279 struct drm_fb_helper *helper;
280 struct tegra_fbdev *fbdev; 279 struct tegra_fbdev *fbdev;
281 int err;
282 280
283 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); 281 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
284 if (!fbdev) { 282 if (!fbdev) {
@@ -286,13 +284,23 @@ static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm,
286 return ERR_PTR(-ENOMEM); 284 return ERR_PTR(-ENOMEM);
287 } 285 }
288 286
289 fbdev->base.funcs = &tegra_fb_helper_funcs; 287 drm_fb_helper_prepare(drm, &fbdev->base, &tegra_fb_helper_funcs);
290 helper = &fbdev->base; 288
289 return fbdev;
290}
291
292static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
293 unsigned int preferred_bpp,
294 unsigned int num_crtc,
295 unsigned int max_connectors)
296{
297 struct drm_device *drm = fbdev->base.dev;
298 int err;
291 299
292 err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors); 300 err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors);
293 if (err < 0) { 301 if (err < 0) {
294 dev_err(drm->dev, "failed to initialize DRM FB helper\n"); 302 dev_err(drm->dev, "failed to initialize DRM FB helper\n");
295 goto free; 303 return err;
296 } 304 }
297 305
298 err = drm_fb_helper_single_add_all_connectors(&fbdev->base); 306 err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
@@ -301,21 +309,17 @@ static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm,
301 goto fini; 309 goto fini;
302 } 310 }
303 311
304 drm_helper_disable_unused_functions(drm);
305
306 err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp); 312 err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
307 if (err < 0) { 313 if (err < 0) {
308 dev_err(drm->dev, "failed to set initial configuration\n"); 314 dev_err(drm->dev, "failed to set initial configuration\n");
309 goto fini; 315 goto fini;
310 } 316 }
311 317
312 return fbdev; 318 return 0;
313 319
314fini: 320fini:
315 drm_fb_helper_fini(&fbdev->base); 321 drm_fb_helper_fini(&fbdev->base);
316free: 322 return err;
317 kfree(fbdev);
318 return ERR_PTR(err);
319} 323}
320 324
321static void tegra_fbdev_free(struct tegra_fbdev *fbdev) 325static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
@@ -366,7 +370,7 @@ static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
366#endif 370#endif
367}; 371};
368 372
369int tegra_drm_fb_init(struct drm_device *drm) 373int tegra_drm_fb_prepare(struct drm_device *drm)
370{ 374{
371#ifdef CONFIG_DRM_TEGRA_FBDEV 375#ifdef CONFIG_DRM_TEGRA_FBDEV
372 struct tegra_drm *tegra = drm->dev_private; 376 struct tegra_drm *tegra = drm->dev_private;
@@ -381,8 +385,7 @@ int tegra_drm_fb_init(struct drm_device *drm)
381 drm->mode_config.funcs = &tegra_drm_mode_funcs; 385 drm->mode_config.funcs = &tegra_drm_mode_funcs;
382 386
383#ifdef CONFIG_DRM_TEGRA_FBDEV 387#ifdef CONFIG_DRM_TEGRA_FBDEV
384 tegra->fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc, 388 tegra->fbdev = tegra_fbdev_create(drm);
385 drm->mode_config.num_connector);
386 if (IS_ERR(tegra->fbdev)) 389 if (IS_ERR(tegra->fbdev))
387 return PTR_ERR(tegra->fbdev); 390 return PTR_ERR(tegra->fbdev);
388#endif 391#endif
@@ -390,6 +393,21 @@ int tegra_drm_fb_init(struct drm_device *drm)
390 return 0; 393 return 0;
391} 394}
392 395
396int tegra_drm_fb_init(struct drm_device *drm)
397{
398#ifdef CONFIG_DRM_TEGRA_FBDEV
399 struct tegra_drm *tegra = drm->dev_private;
400 int err;
401
402 err = tegra_fbdev_init(tegra->fbdev, 32, drm->mode_config.num_crtc,
403 drm->mode_config.num_connector);
404 if (err < 0)
405 return err;
406#endif
407
408 return 0;
409}
410
393void tegra_drm_fb_exit(struct drm_device *drm) 411void tegra_drm_fb_exit(struct drm_device *drm)
394{ 412{
395#ifdef CONFIG_DRM_TEGRA_FBDEV 413#ifdef CONFIG_DRM_TEGRA_FBDEV
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 78cc8143760a..ce023fa3e8ae 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -16,6 +16,7 @@
16#include <linux/dma-buf.h> 16#include <linux/dma-buf.h>
17#include <drm/tegra_drm.h> 17#include <drm/tegra_drm.h>
18 18
19#include "drm.h"
19#include "gem.h" 20#include "gem.h"
20 21
21static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo) 22static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
@@ -126,7 +127,7 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
126 goto err_mmap; 127 goto err_mmap;
127 128
128 if (flags & DRM_TEGRA_GEM_CREATE_TILED) 129 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
129 bo->flags |= TEGRA_BO_TILED; 130 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
130 131
131 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP) 132 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
132 bo->flags |= TEGRA_BO_BOTTOM_UP; 133 bo->flags |= TEGRA_BO_BOTTOM_UP;
@@ -259,8 +260,10 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
259 struct drm_mode_create_dumb *args) 260 struct drm_mode_create_dumb *args)
260{ 261{
261 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 262 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
263 struct tegra_drm *tegra = drm->dev_private;
262 struct tegra_bo *bo; 264 struct tegra_bo *bo;
263 265
266 min_pitch = round_up(min_pitch, tegra->pitch_align);
264 if (args->pitch < min_pitch) 267 if (args->pitch < min_pitch)
265 args->pitch = min_pitch; 268 args->pitch = min_pitch;
266 269
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 2f3fe96c5154..43a25c853357 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -16,8 +16,18 @@
16#include <drm/drm.h> 16#include <drm/drm.h>
17#include <drm/drmP.h> 17#include <drm/drmP.h>
18 18
19#define TEGRA_BO_TILED (1 << 0) 19#define TEGRA_BO_BOTTOM_UP (1 << 0)
20#define TEGRA_BO_BOTTOM_UP (1 << 1) 20
21enum tegra_bo_tiling_mode {
22 TEGRA_BO_TILING_MODE_PITCH,
23 TEGRA_BO_TILING_MODE_TILED,
24 TEGRA_BO_TILING_MODE_BLOCK,
25};
26
27struct tegra_bo_tiling {
28 enum tegra_bo_tiling_mode mode;
29 unsigned long value;
30};
21 31
22struct tegra_bo { 32struct tegra_bo {
23 struct drm_gem_object gem; 33 struct drm_gem_object gem;
@@ -26,6 +36,8 @@ struct tegra_bo {
26 struct sg_table *sgt; 36 struct sg_table *sgt;
27 dma_addr_t paddr; 37 dma_addr_t paddr;
28 void *vaddr; 38 void *vaddr;
39
40 struct tegra_bo_tiling tiling;
29}; 41};
30 42
31static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem) 43static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 7c53941f2a9e..02cd3e37a6ec 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -121,6 +121,7 @@ static const struct of_device_id gr2d_match[] = {
121 { .compatible = "nvidia,tegra20-gr2d" }, 121 { .compatible = "nvidia,tegra20-gr2d" },
122 { }, 122 { },
123}; 123};
124MODULE_DEVICE_TABLE(of, gr2d_match);
124 125
125static const u32 gr2d_addr_regs[] = { 126static const u32 gr2d_addr_regs[] = {
126 GR2D_UA_BASE_ADDR, 127 GR2D_UA_BASE_ADDR,
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 30f5ba9bd6d0..2bea2b2d204e 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -130,6 +130,7 @@ static const struct of_device_id tegra_gr3d_match[] = {
130 { .compatible = "nvidia,tegra20-gr3d" }, 130 { .compatible = "nvidia,tegra20-gr3d" },
131 { } 131 { }
132}; 132};
133MODULE_DEVICE_TABLE(of, tegra_gr3d_match);
133 134
134static const u32 gr3d_addr_regs[] = { 135static const u32 gr3d_addr_regs[] = {
135 GR3D_IDX_ATTRIBUTE( 0), 136 GR3D_IDX_ATTRIBUTE( 0),
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index ba067bb767e3..ffe26547328d 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1450,6 +1450,7 @@ static const struct of_device_id tegra_hdmi_of_match[] = {
1450 { .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config }, 1450 { .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config },
1451 { }, 1451 { },
1452}; 1452};
1453MODULE_DEVICE_TABLE(of, tegra_hdmi_of_match);
1453 1454
1454static int tegra_hdmi_probe(struct platform_device *pdev) 1455static int tegra_hdmi_probe(struct platform_device *pdev)
1455{ 1456{
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index a3e4f1eca6f7..0c67d7eebc94 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -105,7 +105,7 @@ static void drm_connector_clear(struct drm_connector *connector)
105 105
106static void tegra_connector_destroy(struct drm_connector *connector) 106static void tegra_connector_destroy(struct drm_connector *connector)
107{ 107{
108 drm_sysfs_connector_remove(connector); 108 drm_connector_unregister(connector);
109 drm_connector_cleanup(connector); 109 drm_connector_cleanup(connector);
110 drm_connector_clear(connector); 110 drm_connector_clear(connector);
111} 111}
@@ -140,7 +140,9 @@ static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
140 if (mode != DRM_MODE_DPMS_ON) { 140 if (mode != DRM_MODE_DPMS_ON) {
141 drm_panel_disable(panel); 141 drm_panel_disable(panel);
142 tegra_output_disable(output); 142 tegra_output_disable(output);
143 drm_panel_unprepare(panel);
143 } else { 144 } else {
145 drm_panel_prepare(panel);
144 tegra_output_enable(output); 146 tegra_output_enable(output);
145 drm_panel_enable(panel); 147 drm_panel_enable(panel);
146 } 148 }
@@ -318,7 +320,7 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
318 drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs); 320 drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
319 321
320 drm_mode_connector_attach_encoder(&output->connector, &output->encoder); 322 drm_mode_connector_attach_encoder(&output->connector, &output->encoder);
321 drm_sysfs_connector_add(&output->connector); 323 drm_connector_register(&output->connector);
322 324
323 output->encoder.possible_crtcs = 0x3; 325 output->encoder.possible_crtcs = 0x3;
324 326
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 27c979b50111..0410e467b828 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -516,7 +516,7 @@ static int tegra_output_sor_enable(struct tegra_output *output)
516 if (err < 0) { 516 if (err < 0) {
517 dev_err(sor->dev, "failed to probe eDP link: %d\n", 517 dev_err(sor->dev, "failed to probe eDP link: %d\n",
518 err); 518 err);
519 return err; 519 goto unlock;
520 } 520 }
521 } 521 }
522 522
@@ -525,7 +525,7 @@ static int tegra_output_sor_enable(struct tegra_output *output)
525 dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); 525 dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
526 526
527 memset(&config, 0, sizeof(config)); 527 memset(&config, 0, sizeof(config));
528 config.bits_per_pixel = 24; /* XXX: don't hardcode? */ 528 config.bits_per_pixel = output->connector.display_info.bpc * 3;
529 529
530 err = tegra_sor_calc_config(sor, mode, &config, &link); 530 err = tegra_sor_calc_config(sor, mode, &config, &link);
531 if (err < 0) 531 if (err < 0)
@@ -815,12 +815,22 @@ static int tegra_output_sor_enable(struct tegra_output *output)
815 * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete 815 * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
816 * raster, associate with display controller) 816 * raster, associate with display controller)
817 */ 817 */
818 value = SOR_STATE_ASY_VSYNCPOL | 818 value = SOR_STATE_ASY_PROTOCOL_DP_A |
819 SOR_STATE_ASY_HSYNCPOL |
820 SOR_STATE_ASY_PROTOCOL_DP_A |
821 SOR_STATE_ASY_CRC_MODE_COMPLETE | 819 SOR_STATE_ASY_CRC_MODE_COMPLETE |
822 SOR_STATE_ASY_OWNER(dc->pipe + 1); 820 SOR_STATE_ASY_OWNER(dc->pipe + 1);
823 821
822 if (mode->flags & DRM_MODE_FLAG_PHSYNC)
823 value &= ~SOR_STATE_ASY_HSYNCPOL;
824
825 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
826 value |= SOR_STATE_ASY_HSYNCPOL;
827
828 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
829 value &= ~SOR_STATE_ASY_VSYNCPOL;
830
831 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
832 value |= SOR_STATE_ASY_VSYNCPOL;
833
824 switch (config.bits_per_pixel) { 834 switch (config.bits_per_pixel) {
825 case 24: 835 case 24:
826 value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444; 836 value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
@@ -1455,6 +1465,7 @@ static const struct of_device_id tegra_sor_of_match[] = {
1455 { .compatible = "nvidia,tegra124-sor", }, 1465 { .compatible = "nvidia,tegra124-sor", },
1456 { }, 1466 { },
1457}; 1467};
1468MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
1458 1469
1459struct platform_driver tegra_sor_driver = { 1470struct platform_driver tegra_sor_driver = {
1460 .driver = { 1471 .driver = {
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index b20b69488dc9..6be623b4a86f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -120,8 +120,8 @@ static int cpufreq_transition(struct notifier_block *nb,
120static int tilcdc_unload(struct drm_device *dev) 120static int tilcdc_unload(struct drm_device *dev)
121{ 121{
122 struct tilcdc_drm_private *priv = dev->dev_private; 122 struct tilcdc_drm_private *priv = dev->dev_private;
123 struct tilcdc_module *mod, *cur;
124 123
124 drm_fbdev_cma_fini(priv->fbdev);
125 drm_kms_helper_poll_fini(dev); 125 drm_kms_helper_poll_fini(dev);
126 drm_mode_config_cleanup(dev); 126 drm_mode_config_cleanup(dev);
127 drm_vblank_cleanup(dev); 127 drm_vblank_cleanup(dev);
@@ -148,11 +148,6 @@ static int tilcdc_unload(struct drm_device *dev)
148 148
149 pm_runtime_disable(dev->dev); 149 pm_runtime_disable(dev->dev);
150 150
151 list_for_each_entry_safe(mod, cur, &module_list, list) {
152 DBG("destroying module: %s", mod->name);
153 mod->funcs->destroy(mod);
154 }
155
156 kfree(priv); 151 kfree(priv);
157 152
158 return 0; 153 return 0;
@@ -628,13 +623,13 @@ static int __init tilcdc_drm_init(void)
628static void __exit tilcdc_drm_fini(void) 623static void __exit tilcdc_drm_fini(void)
629{ 624{
630 DBG("fini"); 625 DBG("fini");
631 tilcdc_tfp410_fini();
632 tilcdc_slave_fini();
633 tilcdc_panel_fini();
634 platform_driver_unregister(&tilcdc_platform_driver); 626 platform_driver_unregister(&tilcdc_platform_driver);
627 tilcdc_panel_fini();
628 tilcdc_slave_fini();
629 tilcdc_tfp410_fini();
635} 630}
636 631
637late_initcall(tilcdc_drm_init); 632module_init(tilcdc_drm_init);
638module_exit(tilcdc_drm_fini); 633module_exit(tilcdc_drm_fini);
639 634
640MODULE_AUTHOR("Rob Clark <robdclark@gmail.com"); 635MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index 093803683b25..7596c144a9fb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -98,7 +98,6 @@ struct tilcdc_module;
98struct tilcdc_module_ops { 98struct tilcdc_module_ops {
99 /* create appropriate encoders/connectors: */ 99 /* create appropriate encoders/connectors: */
100 int (*modeset_init)(struct tilcdc_module *mod, struct drm_device *dev); 100 int (*modeset_init)(struct tilcdc_module *mod, struct drm_device *dev);
101 void (*destroy)(struct tilcdc_module *mod);
102#ifdef CONFIG_DEBUG_FS 101#ifdef CONFIG_DEBUG_FS
103 /* create debugfs nodes (can be NULL): */ 102 /* create debugfs nodes (can be NULL): */
104 int (*debugfs_init)(struct tilcdc_module *mod, struct drm_minor *minor); 103 int (*debugfs_init)(struct tilcdc_module *mod, struct drm_minor *minor);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 86c67329b605..4c7aa1d8134f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -151,6 +151,7 @@ struct panel_connector {
151static void panel_connector_destroy(struct drm_connector *connector) 151static void panel_connector_destroy(struct drm_connector *connector)
152{ 152{
153 struct panel_connector *panel_connector = to_panel_connector(connector); 153 struct panel_connector *panel_connector = to_panel_connector(connector);
154 drm_connector_unregister(connector);
154 drm_connector_cleanup(connector); 155 drm_connector_cleanup(connector);
155 kfree(panel_connector); 156 kfree(panel_connector);
156} 157}
@@ -247,7 +248,7 @@ static struct drm_connector *panel_connector_create(struct drm_device *dev,
247 if (ret) 248 if (ret)
248 goto fail; 249 goto fail;
249 250
250 drm_sysfs_connector_add(connector); 251 drm_connector_register(connector);
251 252
252 return connector; 253 return connector;
253 254
@@ -281,23 +282,8 @@ static int panel_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
281 return 0; 282 return 0;
282} 283}
283 284
284static void panel_destroy(struct tilcdc_module *mod)
285{
286 struct panel_module *panel_mod = to_panel_module(mod);
287
288 if (panel_mod->timings) {
289 display_timings_release(panel_mod->timings);
290 kfree(panel_mod->timings);
291 }
292
293 tilcdc_module_cleanup(mod);
294 kfree(panel_mod->info);
295 kfree(panel_mod);
296}
297
298static const struct tilcdc_module_ops panel_module_ops = { 285static const struct tilcdc_module_ops panel_module_ops = {
299 .modeset_init = panel_modeset_init, 286 .modeset_init = panel_modeset_init,
300 .destroy = panel_destroy,
301}; 287};
302 288
303/* 289/*
@@ -373,6 +359,7 @@ static int panel_probe(struct platform_device *pdev)
373 return -ENOMEM; 359 return -ENOMEM;
374 360
375 mod = &panel_mod->base; 361 mod = &panel_mod->base;
362 pdev->dev.platform_data = mod;
376 363
377 tilcdc_module_init(mod, "panel", &panel_module_ops); 364 tilcdc_module_init(mod, "panel", &panel_module_ops);
378 365
@@ -380,17 +367,16 @@ static int panel_probe(struct platform_device *pdev)
380 if (IS_ERR(pinctrl)) 367 if (IS_ERR(pinctrl))
381 dev_warn(&pdev->dev, "pins are not configured\n"); 368 dev_warn(&pdev->dev, "pins are not configured\n");
382 369
383
384 panel_mod->timings = of_get_display_timings(node); 370 panel_mod->timings = of_get_display_timings(node);
385 if (!panel_mod->timings) { 371 if (!panel_mod->timings) {
386 dev_err(&pdev->dev, "could not get panel timings\n"); 372 dev_err(&pdev->dev, "could not get panel timings\n");
387 goto fail; 373 goto fail_free;
388 } 374 }
389 375
390 panel_mod->info = of_get_panel_info(node); 376 panel_mod->info = of_get_panel_info(node);
391 if (!panel_mod->info) { 377 if (!panel_mod->info) {
392 dev_err(&pdev->dev, "could not get panel info\n"); 378 dev_err(&pdev->dev, "could not get panel info\n");
393 goto fail; 379 goto fail_timings;
394 } 380 }
395 381
396 mod->preferred_bpp = panel_mod->info->bpp; 382 mod->preferred_bpp = panel_mod->info->bpp;
@@ -401,13 +387,26 @@ static int panel_probe(struct platform_device *pdev)
401 387
402 return 0; 388 return 0;
403 389
404fail: 390fail_timings:
405 panel_destroy(mod); 391 display_timings_release(panel_mod->timings);
392
393fail_free:
394 kfree(panel_mod);
395 tilcdc_module_cleanup(mod);
406 return ret; 396 return ret;
407} 397}
408 398
409static int panel_remove(struct platform_device *pdev) 399static int panel_remove(struct platform_device *pdev)
410{ 400{
401 struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
402 struct panel_module *panel_mod = to_panel_module(mod);
403
404 display_timings_release(panel_mod->timings);
405
406 tilcdc_module_cleanup(mod);
407 kfree(panel_mod->info);
408 kfree(panel_mod);
409
411 return 0; 410 return 0;
412} 411}
413 412
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
index 595068ba2d5e..3775fd49dac4 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -166,6 +166,7 @@ struct slave_connector {
166static void slave_connector_destroy(struct drm_connector *connector) 166static void slave_connector_destroy(struct drm_connector *connector)
167{ 167{
168 struct slave_connector *slave_connector = to_slave_connector(connector); 168 struct slave_connector *slave_connector = to_slave_connector(connector);
169 drm_connector_unregister(connector);
169 drm_connector_cleanup(connector); 170 drm_connector_cleanup(connector);
170 kfree(slave_connector); 171 kfree(slave_connector);
171} 172}
@@ -261,7 +262,7 @@ static struct drm_connector *slave_connector_create(struct drm_device *dev,
261 if (ret) 262 if (ret)
262 goto fail; 263 goto fail;
263 264
264 drm_sysfs_connector_add(connector); 265 drm_connector_register(connector);
265 266
266 return connector; 267 return connector;
267 268
@@ -295,17 +296,8 @@ static int slave_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
295 return 0; 296 return 0;
296} 297}
297 298
298static void slave_destroy(struct tilcdc_module *mod)
299{
300 struct slave_module *slave_mod = to_slave_module(mod);
301
302 tilcdc_module_cleanup(mod);
303 kfree(slave_mod);
304}
305
306static const struct tilcdc_module_ops slave_module_ops = { 299static const struct tilcdc_module_ops slave_module_ops = {
307 .modeset_init = slave_modeset_init, 300 .modeset_init = slave_modeset_init,
308 .destroy = slave_destroy,
309}; 301};
310 302
311/* 303/*
@@ -355,10 +347,13 @@ static int slave_probe(struct platform_device *pdev)
355 } 347 }
356 348
357 slave_mod = kzalloc(sizeof(*slave_mod), GFP_KERNEL); 349 slave_mod = kzalloc(sizeof(*slave_mod), GFP_KERNEL);
358 if (!slave_mod) 350 if (!slave_mod) {
359 return -ENOMEM; 351 ret = -ENOMEM;
352 goto fail_adapter;
353 }
360 354
361 mod = &slave_mod->base; 355 mod = &slave_mod->base;
356 pdev->dev.platform_data = mod;
362 357
363 mod->preferred_bpp = slave_info.bpp; 358 mod->preferred_bpp = slave_info.bpp;
364 359
@@ -373,10 +368,20 @@ static int slave_probe(struct platform_device *pdev)
373 tilcdc_slave_probedefer(false); 368 tilcdc_slave_probedefer(false);
374 369
375 return 0; 370 return 0;
371
372fail_adapter:
373 i2c_put_adapter(slavei2c);
374 return ret;
376} 375}
377 376
378static int slave_remove(struct platform_device *pdev) 377static int slave_remove(struct platform_device *pdev)
379{ 378{
379 struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
380 struct slave_module *slave_mod = to_slave_module(mod);
381
382 tilcdc_module_cleanup(mod);
383 kfree(slave_mod);
384
380 return 0; 385 return 0;
381} 386}
382 387
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index c38b56b268ac..354c47ca6374 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -167,6 +167,7 @@ struct tfp410_connector {
167static void tfp410_connector_destroy(struct drm_connector *connector) 167static void tfp410_connector_destroy(struct drm_connector *connector)
168{ 168{
169 struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector); 169 struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
170 drm_connector_unregister(connector);
170 drm_connector_cleanup(connector); 171 drm_connector_cleanup(connector);
171 kfree(tfp410_connector); 172 kfree(tfp410_connector);
172} 173}
@@ -261,7 +262,7 @@ static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
261 if (ret) 262 if (ret)
262 goto fail; 263 goto fail;
263 264
264 drm_sysfs_connector_add(connector); 265 drm_connector_register(connector);
265 266
266 return connector; 267 return connector;
267 268
@@ -295,23 +296,8 @@ static int tfp410_modeset_init(struct tilcdc_module *mod, struct drm_device *dev
295 return 0; 296 return 0;
296} 297}
297 298
298static void tfp410_destroy(struct tilcdc_module *mod)
299{
300 struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
301
302 if (tfp410_mod->i2c)
303 i2c_put_adapter(tfp410_mod->i2c);
304
305 if (!IS_ERR_VALUE(tfp410_mod->gpio))
306 gpio_free(tfp410_mod->gpio);
307
308 tilcdc_module_cleanup(mod);
309 kfree(tfp410_mod);
310}
311
312static const struct tilcdc_module_ops tfp410_module_ops = { 299static const struct tilcdc_module_ops tfp410_module_ops = {
313 .modeset_init = tfp410_modeset_init, 300 .modeset_init = tfp410_modeset_init,
314 .destroy = tfp410_destroy,
315}; 301};
316 302
317/* 303/*
@@ -341,6 +327,7 @@ static int tfp410_probe(struct platform_device *pdev)
341 return -ENOMEM; 327 return -ENOMEM;
342 328
343 mod = &tfp410_mod->base; 329 mod = &tfp410_mod->base;
330 pdev->dev.platform_data = mod;
344 331
345 tilcdc_module_init(mod, "tfp410", &tfp410_module_ops); 332 tilcdc_module_init(mod, "tfp410", &tfp410_module_ops);
346 333
@@ -364,6 +351,7 @@ static int tfp410_probe(struct platform_device *pdev)
364 tfp410_mod->i2c = of_find_i2c_adapter_by_node(i2c_node); 351 tfp410_mod->i2c = of_find_i2c_adapter_by_node(i2c_node);
365 if (!tfp410_mod->i2c) { 352 if (!tfp410_mod->i2c) {
366 dev_err(&pdev->dev, "could not get i2c\n"); 353 dev_err(&pdev->dev, "could not get i2c\n");
354 of_node_put(i2c_node);
367 goto fail; 355 goto fail;
368 } 356 }
369 357
@@ -377,19 +365,32 @@ static int tfp410_probe(struct platform_device *pdev)
377 ret = gpio_request(tfp410_mod->gpio, "DVI_PDn"); 365 ret = gpio_request(tfp410_mod->gpio, "DVI_PDn");
378 if (ret) { 366 if (ret) {
379 dev_err(&pdev->dev, "could not get DVI_PDn gpio\n"); 367 dev_err(&pdev->dev, "could not get DVI_PDn gpio\n");
380 goto fail; 368 goto fail_adapter;
381 } 369 }
382 } 370 }
383 371
384 return 0; 372 return 0;
385 373
374fail_adapter:
375 i2c_put_adapter(tfp410_mod->i2c);
376
386fail: 377fail:
387 tfp410_destroy(mod); 378 kfree(tfp410_mod);
379 tilcdc_module_cleanup(mod);
388 return ret; 380 return ret;
389} 381}
390 382
391static int tfp410_remove(struct platform_device *pdev) 383static int tfp410_remove(struct platform_device *pdev)
392{ 384{
385 struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
386 struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
387
388 i2c_put_adapter(tfp410_mod->i2c);
389 gpio_free(tfp410_mod->gpio);
390
391 tilcdc_module_cleanup(mod);
392 kfree(tfp410_mod);
393
393 return 0; 394 return 0;
394} 395}
395 396
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4ab9f7171c4f..3da89d5dab60 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -412,7 +412,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
412 int ret; 412 int ret;
413 413
414 spin_lock(&glob->lru_lock); 414 spin_lock(&glob->lru_lock);
415 ret = __ttm_bo_reserve(bo, false, true, false, 0); 415 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
416 416
417 spin_lock(&bdev->fence_lock); 417 spin_lock(&bdev->fence_lock);
418 (void) ttm_bo_wait(bo, false, false, true); 418 (void) ttm_bo_wait(bo, false, false, true);
@@ -514,7 +514,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
514 return ret; 514 return ret;
515 515
516 spin_lock(&glob->lru_lock); 516 spin_lock(&glob->lru_lock);
517 ret = __ttm_bo_reserve(bo, false, true, false, 0); 517 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
518 518
519 /* 519 /*
520 * We raced, and lost, someone else holds the reservation now, 520 * We raced, and lost, someone else holds the reservation now,
@@ -577,11 +577,11 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
577 kref_get(&nentry->list_kref); 577 kref_get(&nentry->list_kref);
578 } 578 }
579 579
580 ret = __ttm_bo_reserve(entry, false, true, false, 0); 580 ret = __ttm_bo_reserve(entry, false, true, false, NULL);
581 if (remove_all && ret) { 581 if (remove_all && ret) {
582 spin_unlock(&glob->lru_lock); 582 spin_unlock(&glob->lru_lock);
583 ret = __ttm_bo_reserve(entry, false, false, 583 ret = __ttm_bo_reserve(entry, false, false,
584 false, 0); 584 false, NULL);
585 spin_lock(&glob->lru_lock); 585 spin_lock(&glob->lru_lock);
586 } 586 }
587 587
@@ -726,7 +726,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
726 726
727 spin_lock(&glob->lru_lock); 727 spin_lock(&glob->lru_lock);
728 list_for_each_entry(bo, &man->lru, lru) { 728 list_for_each_entry(bo, &man->lru, lru) {
729 ret = __ttm_bo_reserve(bo, false, true, false, 0); 729 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
730 if (!ret) 730 if (!ret)
731 break; 731 break;
732 } 732 }
@@ -784,7 +784,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
784 int ret; 784 int ret;
785 785
786 do { 786 do {
787 ret = (*man->func->get_node)(man, bo, placement, mem); 787 ret = (*man->func->get_node)(man, bo, placement, 0, mem);
788 if (unlikely(ret != 0)) 788 if (unlikely(ret != 0))
789 return ret; 789 return ret;
790 if (mem->mm_node) 790 if (mem->mm_node)
@@ -897,7 +897,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
897 897
898 if (man->has_type && man->use_type) { 898 if (man->has_type && man->use_type) {
899 type_found = true; 899 type_found = true;
900 ret = (*man->func->get_node)(man, bo, placement, mem); 900 ret = (*man->func->get_node)(man, bo, placement,
901 cur_flags, mem);
901 if (unlikely(ret)) 902 if (unlikely(ret))
902 return ret; 903 return ret;
903 } 904 }
@@ -937,7 +938,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
937 ttm_flag_masked(&cur_flags, placement->busy_placement[i], 938 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
938 ~TTM_PL_MASK_MEMTYPE); 939 ~TTM_PL_MASK_MEMTYPE);
939 940
940
941 if (mem_type == TTM_PL_SYSTEM) { 941 if (mem_type == TTM_PL_SYSTEM) {
942 mem->mem_type = mem_type; 942 mem->mem_type = mem_type;
943 mem->placement = cur_flags; 943 mem->placement = cur_flags;
@@ -1595,7 +1595,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1595 * Using ttm_bo_reserve makes sure the lru lists are updated. 1595 * Using ttm_bo_reserve makes sure the lru lists are updated.
1596 */ 1596 */
1597 1597
1598 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 1598 ret = ttm_bo_reserve(bo, true, no_wait, false, NULL);
1599 if (unlikely(ret != 0)) 1599 if (unlikely(ret != 0))
1600 return ret; 1600 return ret;
1601 spin_lock(&bdev->fence_lock); 1601 spin_lock(&bdev->fence_lock);
@@ -1630,7 +1630,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1630 1630
1631 spin_lock(&glob->lru_lock); 1631 spin_lock(&glob->lru_lock);
1632 list_for_each_entry(bo, &glob->swap_lru, swap) { 1632 list_for_each_entry(bo, &glob->swap_lru, swap) {
1633 ret = __ttm_bo_reserve(bo, false, true, false, 0); 1633 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
1634 if (!ret) 1634 if (!ret)
1635 break; 1635 break;
1636 } 1636 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index bd850c9f4bca..9e103a4875c8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -50,6 +50,7 @@ struct ttm_range_manager {
50static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, 50static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
51 struct ttm_buffer_object *bo, 51 struct ttm_buffer_object *bo,
52 struct ttm_placement *placement, 52 struct ttm_placement *placement,
53 uint32_t flags,
53 struct ttm_mem_reg *mem) 54 struct ttm_mem_reg *mem)
54{ 55{
55 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; 56 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
@@ -67,7 +68,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
67 if (!node) 68 if (!node)
68 return -ENOMEM; 69 return -ENOMEM;
69 70
70 if (bo->mem.placement & TTM_PL_FLAG_TOPDOWN) 71 if (flags & TTM_PL_FLAG_TOPDOWN)
71 aflags = DRM_MM_CREATE_TOP; 72 aflags = DRM_MM_CREATE_TOP;
72 73
73 spin_lock(&rman->lock); 74 spin_lock(&rman->lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 1df856f78568..30e5d90cb7bc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -500,7 +500,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
500 pgprot_val(tmp) |= _PAGE_GUARDED; 500 pgprot_val(tmp) |= _PAGE_GUARDED;
501 } 501 }
502#endif 502#endif
503#if defined(__ia64__) 503#if defined(__ia64__) || defined(__arm__)
504 if (caching_flags & TTM_PL_FLAG_WC) 504 if (caching_flags & TTM_PL_FLAG_WC)
505 tmp = pgprot_writecombine(tmp); 505 tmp = pgprot_writecombine(tmp);
506 else 506 else
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index d7f92fe9d904..66fc6395eb54 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -35,7 +35,7 @@
35#include <drm/drm_sysfs.h> 35#include <drm/drm_sysfs.h>
36 36
37static DECLARE_WAIT_QUEUE_HEAD(exit_q); 37static DECLARE_WAIT_QUEUE_HEAD(exit_q);
38atomic_t device_released; 38static atomic_t device_released;
39 39
40static struct device_type ttm_drm_class_type = { 40static struct device_type ttm_drm_class_type = {
41 .name = "ttm", 41 .name = "ttm",
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 863bef9f9234..09874d695188 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -297,8 +297,10 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
297 * 297 *
298 * @pool: to free the pages from 298 * @pool: to free the pages from
299 * @free_all: If set to true will free all pages in pool 299 * @free_all: If set to true will free all pages in pool
300 * @gfp: GFP flags.
300 **/ 301 **/
301static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) 302static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
303 gfp_t gfp)
302{ 304{
303 unsigned long irq_flags; 305 unsigned long irq_flags;
304 struct page *p; 306 struct page *p;
@@ -309,8 +311,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
309 if (NUM_PAGES_TO_ALLOC < nr_free) 311 if (NUM_PAGES_TO_ALLOC < nr_free)
310 npages_to_free = NUM_PAGES_TO_ALLOC; 312 npages_to_free = NUM_PAGES_TO_ALLOC;
311 313
312 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), 314 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
313 GFP_KERNEL);
314 if (!pages_to_free) { 315 if (!pages_to_free) {
315 pr_err("Failed to allocate memory for pool free operation\n"); 316 pr_err("Failed to allocate memory for pool free operation\n");
316 return 0; 317 return 0;
@@ -382,32 +383,35 @@ out:
382 * 383 *
383 * XXX: (dchinner) Deadlock warning! 384 * XXX: (dchinner) Deadlock warning!
384 * 385 *
385 * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means 386 * We need to pass sc->gfp_mask to ttm_page_pool_free().
386 * this can deadlock when called a sc->gfp_mask that is not equal to
387 * GFP_KERNEL.
388 * 387 *
389 * This code is crying out for a shrinker per pool.... 388 * This code is crying out for a shrinker per pool....
390 */ 389 */
391static unsigned long 390static unsigned long
392ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 391ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
393{ 392{
394 static atomic_t start_pool = ATOMIC_INIT(0); 393 static DEFINE_MUTEX(lock);
394 static unsigned start_pool;
395 unsigned i; 395 unsigned i;
396 unsigned pool_offset = atomic_add_return(1, &start_pool); 396 unsigned pool_offset;
397 struct ttm_page_pool *pool; 397 struct ttm_page_pool *pool;
398 int shrink_pages = sc->nr_to_scan; 398 int shrink_pages = sc->nr_to_scan;
399 unsigned long freed = 0; 399 unsigned long freed = 0;
400 400
401 pool_offset = pool_offset % NUM_POOLS; 401 if (!mutex_trylock(&lock))
402 return SHRINK_STOP;
403 pool_offset = ++start_pool % NUM_POOLS;
402 /* select start pool in round robin fashion */ 404 /* select start pool in round robin fashion */
403 for (i = 0; i < NUM_POOLS; ++i) { 405 for (i = 0; i < NUM_POOLS; ++i) {
404 unsigned nr_free = shrink_pages; 406 unsigned nr_free = shrink_pages;
405 if (shrink_pages == 0) 407 if (shrink_pages == 0)
406 break; 408 break;
407 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; 409 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
408 shrink_pages = ttm_page_pool_free(pool, nr_free); 410 shrink_pages = ttm_page_pool_free(pool, nr_free,
411 sc->gfp_mask);
409 freed += nr_free - shrink_pages; 412 freed += nr_free - shrink_pages;
410 } 413 }
414 mutex_unlock(&lock);
411 return freed; 415 return freed;
412} 416}
413 417
@@ -706,7 +710,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
706 } 710 }
707 spin_unlock_irqrestore(&pool->lock, irq_flags); 711 spin_unlock_irqrestore(&pool->lock, irq_flags);
708 if (npages) 712 if (npages)
709 ttm_page_pool_free(pool, npages); 713 ttm_page_pool_free(pool, npages, GFP_KERNEL);
710} 714}
711 715
712/* 716/*
@@ -790,7 +794,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
790 return 0; 794 return 0;
791} 795}
792 796
793static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, 797static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
794 char *name) 798 char *name)
795{ 799{
796 spin_lock_init(&pool->lock); 800 spin_lock_init(&pool->lock);
@@ -846,7 +850,8 @@ void ttm_page_alloc_fini(void)
846 ttm_pool_mm_shrink_fini(_manager); 850 ttm_pool_mm_shrink_fini(_manager);
847 851
848 for (i = 0; i < NUM_POOLS; ++i) 852 for (i = 0; i < NUM_POOLS; ++i)
849 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); 853 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
854 GFP_KERNEL);
850 855
851 kobject_put(&_manager->kobj); 856 kobject_put(&_manager->kobj);
852 _manager = NULL; 857 _manager = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index fb8259f69839..ca65df144765 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -411,8 +411,10 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
411 * 411 *
412 * @pool: to free the pages from 412 * @pool: to free the pages from
413 * @nr_free: If set to true will free all pages in pool 413 * @nr_free: If set to true will free all pages in pool
414 * @gfp: GFP flags.
414 **/ 415 **/
415static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free) 416static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
417 gfp_t gfp)
416{ 418{
417 unsigned long irq_flags; 419 unsigned long irq_flags;
418 struct dma_page *dma_p, *tmp; 420 struct dma_page *dma_p, *tmp;
@@ -430,8 +432,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
430 npages_to_free, nr_free); 432 npages_to_free, nr_free);
431 } 433 }
432#endif 434#endif
433 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), 435 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
434 GFP_KERNEL);
435 436
436 if (!pages_to_free) { 437 if (!pages_to_free) {
437 pr_err("%s: Failed to allocate memory for pool free operation\n", 438 pr_err("%s: Failed to allocate memory for pool free operation\n",
@@ -530,7 +531,7 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
530 if (pool->type != type) 531 if (pool->type != type)
531 continue; 532 continue;
532 /* Takes a spinlock.. */ 533 /* Takes a spinlock.. */
533 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES); 534 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
534 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); 535 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
535 /* This code path is called after _all_ references to the 536 /* This code path is called after _all_ references to the
536 * struct device has been dropped - so nobody should be 537 * struct device has been dropped - so nobody should be
@@ -983,7 +984,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
983 984
984 /* shrink pool if necessary (only on !is_cached pools)*/ 985 /* shrink pool if necessary (only on !is_cached pools)*/
985 if (npages) 986 if (npages)
986 ttm_dma_page_pool_free(pool, npages); 987 ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
987 ttm->state = tt_unpopulated; 988 ttm->state = tt_unpopulated;
988} 989}
989EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); 990EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
@@ -993,10 +994,7 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
993 * 994 *
994 * XXX: (dchinner) Deadlock warning! 995 * XXX: (dchinner) Deadlock warning!
995 * 996 *
996 * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention 997 * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
997 * needs to be paid to sc->gfp_mask to determine if this can be done or not.
998 * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
999 * bad.
1000 * 998 *
1001 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool 999 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1002 * shrinkers 1000 * shrinkers
@@ -1004,9 +1002,9 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1004static unsigned long 1002static unsigned long
1005ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 1003ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1006{ 1004{
1007 static atomic_t start_pool = ATOMIC_INIT(0); 1005 static unsigned start_pool;
1008 unsigned idx = 0; 1006 unsigned idx = 0;
1009 unsigned pool_offset = atomic_add_return(1, &start_pool); 1007 unsigned pool_offset;
1010 unsigned shrink_pages = sc->nr_to_scan; 1008 unsigned shrink_pages = sc->nr_to_scan;
1011 struct device_pools *p; 1009 struct device_pools *p;
1012 unsigned long freed = 0; 1010 unsigned long freed = 0;
@@ -1014,8 +1012,11 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1014 if (list_empty(&_manager->pools)) 1012 if (list_empty(&_manager->pools))
1015 return SHRINK_STOP; 1013 return SHRINK_STOP;
1016 1014
1017 mutex_lock(&_manager->lock); 1015 if (!mutex_trylock(&_manager->lock))
1018 pool_offset = pool_offset % _manager->npools; 1016 return SHRINK_STOP;
1017 if (!_manager->npools)
1018 goto out;
1019 pool_offset = ++start_pool % _manager->npools;
1019 list_for_each_entry(p, &_manager->pools, pools) { 1020 list_for_each_entry(p, &_manager->pools, pools) {
1020 unsigned nr_free; 1021 unsigned nr_free;
1021 1022
@@ -1027,13 +1028,15 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1027 if (++idx < pool_offset) 1028 if (++idx < pool_offset)
1028 continue; 1029 continue;
1029 nr_free = shrink_pages; 1030 nr_free = shrink_pages;
1030 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free); 1031 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
1032 sc->gfp_mask);
1031 freed += nr_free - shrink_pages; 1033 freed += nr_free - shrink_pages;
1032 1034
1033 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", 1035 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1034 p->pool->dev_name, p->pool->name, current->pid, 1036 p->pool->dev_name, p->pool->name, current->pid,
1035 nr_free, shrink_pages); 1037 nr_free, shrink_pages);
1036 } 1038 }
1039out:
1037 mutex_unlock(&_manager->lock); 1040 mutex_unlock(&_manager->lock);
1038 return freed; 1041 return freed;
1039} 1042}
@@ -1044,7 +1047,8 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1044 struct device_pools *p; 1047 struct device_pools *p;
1045 unsigned long count = 0; 1048 unsigned long count = 0;
1046 1049
1047 mutex_lock(&_manager->lock); 1050 if (!mutex_trylock(&_manager->lock))
1051 return 0;
1048 list_for_each_entry(p, &_manager->pools, pools) 1052 list_for_each_entry(p, &_manager->pools, pools)
1049 count += p->pool->npages_free; 1053 count += p->pool->npages_free;
1050 mutex_unlock(&_manager->lock); 1054 mutex_unlock(&_manager->lock);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index b44d548c56f8..e026a9e2942a 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -105,14 +105,7 @@ static struct drm_encoder*
105udl_best_single_encoder(struct drm_connector *connector) 105udl_best_single_encoder(struct drm_connector *connector)
106{ 106{
107 int enc_id = connector->encoder_ids[0]; 107 int enc_id = connector->encoder_ids[0];
108 struct drm_mode_object *obj; 108 return drm_encoder_find(connector->dev, enc_id);
109 struct drm_encoder *encoder;
110
111 obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
112 if (!obj)
113 return NULL;
114 encoder = obj_to_encoder(obj);
115 return encoder;
116} 109}
117 110
118static int udl_connector_set_property(struct drm_connector *connector, 111static int udl_connector_set_property(struct drm_connector *connector,
@@ -124,7 +117,7 @@ static int udl_connector_set_property(struct drm_connector *connector,
124 117
125static void udl_connector_destroy(struct drm_connector *connector) 118static void udl_connector_destroy(struct drm_connector *connector)
126{ 119{
127 drm_sysfs_connector_remove(connector); 120 drm_connector_unregister(connector);
128 drm_connector_cleanup(connector); 121 drm_connector_cleanup(connector);
129 kfree(connector); 122 kfree(connector);
130} 123}
@@ -154,7 +147,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
154 drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_DVII); 147 drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_DVII);
155 drm_connector_helper_add(connector, &udl_connector_helper_funcs); 148 drm_connector_helper_add(connector, &udl_connector_helper_funcs);
156 149
157 drm_sysfs_connector_add(connector); 150 drm_connector_register(connector);
158 drm_mode_connector_attach_encoder(connector, encoder); 151 drm_mode_connector_attach_encoder(connector, encoder);
159 152
160 drm_object_attach_property(&connector->base, 153 drm_object_attach_property(&connector->base,
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 377176372da8..d1da339843ca 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -550,7 +550,7 @@ out:
550 return ret; 550 return ret;
551} 551}
552 552
553static struct drm_fb_helper_funcs udl_fb_helper_funcs = { 553static const struct drm_fb_helper_funcs udl_fb_helper_funcs = {
554 .fb_probe = udlfb_create, 554 .fb_probe = udlfb_create,
555}; 555};
556 556
@@ -583,7 +583,8 @@ int udl_fbdev_init(struct drm_device *dev)
583 return -ENOMEM; 583 return -ENOMEM;
584 584
585 udl->fbdev = ufbdev; 585 udl->fbdev = ufbdev;
586 ufbdev->helper.funcs = &udl_fb_helper_funcs; 586
587 drm_fb_helper_prepare(dev, &ufbdev->helper, &udl_fb_helper_funcs);
587 588
588 ret = drm_fb_helper_init(dev, &ufbdev->helper, 589 ret = drm_fb_helper_init(dev, &ufbdev->helper,
589 1, 1); 590 1, 1);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index c041cd73f399..8044f5fb7c49 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -107,14 +107,14 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
107 } 107 }
108} 108}
109 109
110static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) 110static int udl_gem_get_pages(struct udl_gem_object *obj)
111{ 111{
112 struct page **pages; 112 struct page **pages;
113 113
114 if (obj->pages) 114 if (obj->pages)
115 return 0; 115 return 0;
116 116
117 pages = drm_gem_get_pages(&obj->base, gfpmask); 117 pages = drm_gem_get_pages(&obj->base);
118 if (IS_ERR(pages)) 118 if (IS_ERR(pages))
119 return PTR_ERR(pages); 119 return PTR_ERR(pages);
120 120
@@ -147,7 +147,7 @@ int udl_gem_vmap(struct udl_gem_object *obj)
147 return 0; 147 return 0;
148 } 148 }
149 149
150 ret = udl_gem_get_pages(obj, GFP_KERNEL); 150 ret = udl_gem_get_pages(obj);
151 if (ret) 151 if (ret)
152 return ret; 152 return ret;
153 153
@@ -205,7 +205,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
205 } 205 }
206 gobj = to_udl_bo(obj); 206 gobj = to_udl_bo(obj);
207 207
208 ret = udl_gem_get_pages(gobj, GFP_KERNEL); 208 ret = udl_gem_get_pages(gobj);
209 if (ret) 209 if (ret)
210 goto out; 210 goto out;
211 ret = drm_gem_create_mmap_offset(obj); 211 ret = drm_gem_create_mmap_offset(obj);
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 7094b92d1ec7..42795674bc07 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -306,10 +306,23 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
306 306
307 DRM_DEBUG("\n"); 307 DRM_DEBUG("\n");
308 ret = udl_modeset_init(dev); 308 ret = udl_modeset_init(dev);
309 if (ret)
310 goto err;
309 311
310 ret = udl_fbdev_init(dev); 312 ret = udl_fbdev_init(dev);
313 if (ret)
314 goto err;
315
316 ret = drm_vblank_init(dev, 1);
317 if (ret)
318 goto err_fb;
319
311 return 0; 320 return 0;
321err_fb:
322 udl_fbdev_cleanup(dev);
312err: 323err:
324 if (udl->urbs.count)
325 udl_free_urb_list(dev);
313 kfree(udl); 326 kfree(udl);
314 DRM_ERROR("%d\n", ret); 327 DRM_ERROR("%d\n", ret);
315 return ret; 328 return ret;
@@ -325,6 +338,8 @@ int udl_driver_unload(struct drm_device *dev)
325{ 338{
326 struct udl_device *udl = dev->dev_private; 339 struct udl_device *udl = dev->dev_private;
327 340
341 drm_vblank_cleanup(dev);
342
328 if (udl->urbs.count) 343 if (udl->urbs.count)
329 udl_free_urb_list(dev); 344 udl_free_urb_list(dev);
330 345
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index cddc4fcf35cf..dc145d320b25 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -363,6 +363,26 @@ static void udl_crtc_destroy(struct drm_crtc *crtc)
363 kfree(crtc); 363 kfree(crtc);
364} 364}
365 365
366static int udl_crtc_page_flip(struct drm_crtc *crtc,
367 struct drm_framebuffer *fb,
368 struct drm_pending_vblank_event *event,
369 uint32_t page_flip_flags)
370{
371 struct udl_framebuffer *ufb = to_udl_fb(fb);
372 struct drm_device *dev = crtc->dev;
373 unsigned long flags;
374
375 udl_handle_damage(ufb, 0, 0, fb->width, fb->height);
376
377 spin_lock_irqsave(&dev->event_lock, flags);
378 if (event)
379 drm_send_vblank_event(dev, 0, event);
380 spin_unlock_irqrestore(&dev->event_lock, flags);
381 crtc->primary->fb = fb;
382
383 return 0;
384}
385
366static void udl_crtc_prepare(struct drm_crtc *crtc) 386static void udl_crtc_prepare(struct drm_crtc *crtc)
367{ 387{
368} 388}
@@ -384,6 +404,7 @@ static struct drm_crtc_helper_funcs udl_helper_funcs = {
384static const struct drm_crtc_funcs udl_crtc_funcs = { 404static const struct drm_crtc_funcs udl_crtc_funcs = {
385 .set_config = drm_crtc_helper_set_config, 405 .set_config = drm_crtc_helper_set_config,
386 .destroy = udl_crtc_destroy, 406 .destroy = udl_crtc_destroy,
407 .page_flip = udl_crtc_page_flip,
387}; 408};
388 409
389static int udl_crtc_init(struct drm_device *dev) 410static int udl_crtc_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 458cdf6d81e8..ce0ab951f507 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -6,6 +6,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ 6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ 7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ 8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
9 vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o 9 vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
10 vmwgfx_cmdbuf_res.o \
10 11
11obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o 12obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
new file mode 100644
index 000000000000..bfeb4b1f2acf
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -0,0 +1,341 @@
1/**************************************************************************
2 *
3 * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29
30#define VMW_CMDBUF_RES_MAN_HT_ORDER 12
31
32enum vmw_cmdbuf_res_state {
33 VMW_CMDBUF_RES_COMMITED,
34 VMW_CMDBUF_RES_ADD,
35 VMW_CMDBUF_RES_DEL
36};
37
38/**
39 * struct vmw_cmdbuf_res - Command buffer managed resource entry.
40 *
41 * @res: Refcounted pointer to a struct vmw_resource.
42 * @hash: Hash entry for the manager hash table.
43 * @head: List head used either by the staging list or the manager list
44 * of commited resources.
45 * @state: Staging state of this resource entry.
46 * @man: Pointer to a resource manager for this entry.
47 */
48struct vmw_cmdbuf_res {
49 struct vmw_resource *res;
50 struct drm_hash_item hash;
51 struct list_head head;
52 enum vmw_cmdbuf_res_state state;
53 struct vmw_cmdbuf_res_manager *man;
54};
55
56/**
57 * struct vmw_cmdbuf_res_manager - Command buffer resource manager.
58 *
59 * @resources: Hash table containing staged and commited command buffer
60 * resources
61 * @list: List of commited command buffer resources.
62 * @dev_priv: Pointer to a device private structure.
63 *
64 * @resources and @list are protected by the cmdbuf mutex for now.
65 */
66struct vmw_cmdbuf_res_manager {
67 struct drm_open_hash resources;
68 struct list_head list;
69 struct vmw_private *dev_priv;
70};
71
72
73/**
74 * vmw_cmdbuf_res_lookup - Look up a command buffer resource
75 *
76 * @man: Pointer to the command buffer resource manager
77 * @resource_type: The resource type, that combined with the user key
78 * identifies the resource.
79 * @user_key: The user key.
80 *
81 * Returns a valid refcounted struct vmw_resource pointer on success,
82 * an error pointer on failure.
83 */
84struct vmw_resource *
85vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
86 enum vmw_cmdbuf_res_type res_type,
87 u32 user_key)
88{
89 struct drm_hash_item *hash;
90 int ret;
91 unsigned long key = user_key | (res_type << 24);
92
93 ret = drm_ht_find_item(&man->resources, key, &hash);
94 if (unlikely(ret != 0))
95 return ERR_PTR(ret);
96
97 return vmw_resource_reference
98 (drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res);
99}
100
101/**
102 * vmw_cmdbuf_res_free - Free a command buffer resource.
103 *
104 * @man: Pointer to the command buffer resource manager
105 * @entry: Pointer to a struct vmw_cmdbuf_res.
106 *
107 * Frees a struct vmw_cmdbuf_res entry and drops its reference to the
108 * struct vmw_resource.
109 */
110static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
111 struct vmw_cmdbuf_res *entry)
112{
113 list_del(&entry->head);
114 WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash));
115 vmw_resource_unreference(&entry->res);
116 kfree(entry);
117}
118
119/**
120 * vmw_cmdbuf_res_commit - Commit a list of command buffer resource actions
121 *
122 * @list: Caller's list of command buffer resource actions.
123 *
124 * This function commits a list of command buffer resource
125 * additions or removals.
126 * It is typically called when the execbuf ioctl call triggering these
127 * actions has commited the fifo contents to the device.
128 */
129void vmw_cmdbuf_res_commit(struct list_head *list)
130{
131 struct vmw_cmdbuf_res *entry, *next;
132
133 list_for_each_entry_safe(entry, next, list, head) {
134 list_del(&entry->head);
135 switch (entry->state) {
136 case VMW_CMDBUF_RES_ADD:
137 entry->state = VMW_CMDBUF_RES_COMMITED;
138 list_add_tail(&entry->head, &entry->man->list);
139 break;
140 case VMW_CMDBUF_RES_DEL:
141 vmw_resource_unreference(&entry->res);
142 kfree(entry);
143 break;
144 default:
145 BUG();
146 break;
147 }
148 }
149}
150
151/**
152 * vmw_cmdbuf_res_revert - Revert a list of command buffer resource actions
153 *
154 * @man: Pointer to the command buffer resource manager
155 * @list: Caller's list of command buffer resource action
156 *
157 * This function reverts a list of command buffer resource
158 * additions or removals.
159 * It is typically called when the execbuf ioctl call triggering these
160 * actions failed for some reason, and the command stream was never
161 * submitted.
162 */
163void vmw_cmdbuf_res_revert(struct list_head *list)
164{
165 struct vmw_cmdbuf_res *entry, *next;
166 int ret;
167
168 list_for_each_entry_safe(entry, next, list, head) {
169 switch (entry->state) {
170 case VMW_CMDBUF_RES_ADD:
171 vmw_cmdbuf_res_free(entry->man, entry);
172 break;
173 case VMW_CMDBUF_RES_DEL:
174 ret = drm_ht_insert_item(&entry->man->resources,
175 &entry->hash);
176 list_del(&entry->head);
177 list_add_tail(&entry->head, &entry->man->list);
178 entry->state = VMW_CMDBUF_RES_COMMITED;
179 break;
180 default:
181 BUG();
182 break;
183 }
184 }
185}
186
187/**
188 * vmw_cmdbuf_res_add - Stage a command buffer managed resource for addition.
189 *
190 * @man: Pointer to the command buffer resource manager.
191 * @res_type: The resource type.
192 * @user_key: The user-space id of the resource.
193 * @res: Valid (refcount != 0) pointer to a struct vmw_resource.
194 * @list: The staging list.
195 *
196 * This function allocates a struct vmw_cmdbuf_res entry and adds the
197 * resource to the hash table of the manager identified by @man. The
198 * entry is then put on the staging list identified by @list.
199 */
200int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
201 enum vmw_cmdbuf_res_type res_type,
202 u32 user_key,
203 struct vmw_resource *res,
204 struct list_head *list)
205{
206 struct vmw_cmdbuf_res *cres;
207 int ret;
208
209 cres = kzalloc(sizeof(*cres), GFP_KERNEL);
210 if (unlikely(cres == NULL))
211 return -ENOMEM;
212
213 cres->hash.key = user_key | (res_type << 24);
214 ret = drm_ht_insert_item(&man->resources, &cres->hash);
215 if (unlikely(ret != 0))
216 goto out_invalid_key;
217
218 cres->state = VMW_CMDBUF_RES_ADD;
219 cres->res = vmw_resource_reference(res);
220 cres->man = man;
221 list_add_tail(&cres->head, list);
222
223out_invalid_key:
224 return ret;
225}
226
227/**
228 * vmw_cmdbuf_res_remove - Stage a command buffer managed resource for removal.
229 *
230 * @man: Pointer to the command buffer resource manager.
231 * @res_type: The resource type.
232 * @user_key: The user-space id of the resource.
233 * @list: The staging list.
234 *
235 * This function looks up the struct vmw_cmdbuf_res entry from the manager
236 * hash table and, if it exists, removes it. Depending on its current staging
237 * state it then either removes the entry from the staging list or adds it
238 * to it with a staging state of removal.
239 */
240int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
241 enum vmw_cmdbuf_res_type res_type,
242 u32 user_key,
243 struct list_head *list)
244{
245 struct vmw_cmdbuf_res *entry;
246 struct drm_hash_item *hash;
247 int ret;
248
249 ret = drm_ht_find_item(&man->resources, user_key, &hash);
250 if (likely(ret != 0))
251 return -EINVAL;
252
253 entry = drm_hash_entry(hash, struct vmw_cmdbuf_res, hash);
254
255 switch (entry->state) {
256 case VMW_CMDBUF_RES_ADD:
257 vmw_cmdbuf_res_free(man, entry);
258 break;
259 case VMW_CMDBUF_RES_COMMITED:
260 (void) drm_ht_remove_item(&man->resources, &entry->hash);
261 list_del(&entry->head);
262 entry->state = VMW_CMDBUF_RES_DEL;
263 list_add_tail(&entry->head, list);
264 break;
265 default:
266 BUG();
267 break;
268 }
269
270 return 0;
271}
272
273/**
274 * vmw_cmdbuf_res_man_create - Allocate a command buffer managed resource
275 * manager.
276 *
277 * @dev_priv: Pointer to a struct vmw_private
278 *
279 * Allocates and initializes a command buffer managed resource manager. Returns
280 * an error pointer on failure.
281 */
282struct vmw_cmdbuf_res_manager *
283vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
284{
285 struct vmw_cmdbuf_res_manager *man;
286 int ret;
287
288 man = kzalloc(sizeof(*man), GFP_KERNEL);
289 if (man == NULL)
290 return ERR_PTR(-ENOMEM);
291
292 man->dev_priv = dev_priv;
293 INIT_LIST_HEAD(&man->list);
294 ret = drm_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
295 if (ret == 0)
296 return man;
297
298 kfree(man);
299 return ERR_PTR(ret);
300}
301
302/**
303 * vmw_cmdbuf_res_man_destroy - Destroy a command buffer managed resource
304 * manager.
305 *
306 * @man: Pointer to the manager to destroy.
307 *
308 * This function destroys a command buffer managed resource manager and
309 * unreferences / frees all command buffer managed resources and -entries
310 * associated with it.
311 */
312void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
313{
314 struct vmw_cmdbuf_res *entry, *next;
315
316 list_for_each_entry_safe(entry, next, &man->list, head)
317 vmw_cmdbuf_res_free(man, entry);
318
319 kfree(man);
320}
321
322/**
323 *
324 * vmw_cmdbuf_res_man_size - Return the size of a command buffer managed
325 * resource manager
326 *
327 * Returns the approximate allocation size of a command buffer managed
328 * resource manager.
329 */
330size_t vmw_cmdbuf_res_man_size(void)
331{
332 static size_t res_man_size;
333
334 if (unlikely(res_man_size == 0))
335 res_man_size =
336 ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) +
337 ttm_round_pot(sizeof(struct hlist_head) <<
338 VMW_CMDBUF_RES_MAN_HT_ORDER);
339
340 return res_man_size;
341}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 8bb26dcd9eae..5ac92874404d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -33,6 +33,7 @@ struct vmw_user_context {
33 struct ttm_base_object base; 33 struct ttm_base_object base;
34 struct vmw_resource res; 34 struct vmw_resource res;
35 struct vmw_ctx_binding_state cbs; 35 struct vmw_ctx_binding_state cbs;
36 struct vmw_cmdbuf_res_manager *man;
36}; 37};
37 38
38 39
@@ -103,7 +104,8 @@ static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
103 104
104static void vmw_hw_context_destroy(struct vmw_resource *res) 105static void vmw_hw_context_destroy(struct vmw_resource *res)
105{ 106{
106 107 struct vmw_user_context *uctx =
108 container_of(res, struct vmw_user_context, res);
107 struct vmw_private *dev_priv = res->dev_priv; 109 struct vmw_private *dev_priv = res->dev_priv;
108 struct { 110 struct {
109 SVGA3dCmdHeader header; 111 SVGA3dCmdHeader header;
@@ -113,9 +115,9 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
113 115
114 if (res->func->destroy == vmw_gb_context_destroy) { 116 if (res->func->destroy == vmw_gb_context_destroy) {
115 mutex_lock(&dev_priv->cmdbuf_mutex); 117 mutex_lock(&dev_priv->cmdbuf_mutex);
118 vmw_cmdbuf_res_man_destroy(uctx->man);
116 mutex_lock(&dev_priv->binding_mutex); 119 mutex_lock(&dev_priv->binding_mutex);
117 (void) vmw_context_binding_state_kill 120 (void) vmw_context_binding_state_kill(&uctx->cbs);
118 (&container_of(res, struct vmw_user_context, res)->cbs);
119 (void) vmw_gb_context_destroy(res); 121 (void) vmw_gb_context_destroy(res);
120 mutex_unlock(&dev_priv->binding_mutex); 122 mutex_unlock(&dev_priv->binding_mutex);
121 if (dev_priv->pinned_bo != NULL && 123 if (dev_priv->pinned_bo != NULL &&
@@ -152,13 +154,16 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
152 ret = vmw_resource_init(dev_priv, res, true, 154 ret = vmw_resource_init(dev_priv, res, true,
153 res_free, &vmw_gb_context_func); 155 res_free, &vmw_gb_context_func);
154 res->backup_size = SVGA3D_CONTEXT_DATA_SIZE; 156 res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
157 if (unlikely(ret != 0))
158 goto out_err;
155 159
156 if (unlikely(ret != 0)) { 160 if (dev_priv->has_mob) {
157 if (res_free) 161 uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
158 res_free(res); 162 if (unlikely(IS_ERR(uctx->man))) {
159 else 163 ret = PTR_ERR(uctx->man);
160 kfree(res); 164 uctx->man = NULL;
161 return ret; 165 goto out_err;
166 }
162 } 167 }
163 168
164 memset(&uctx->cbs, 0, sizeof(uctx->cbs)); 169 memset(&uctx->cbs, 0, sizeof(uctx->cbs));
@@ -166,6 +171,13 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
166 171
167 vmw_resource_activate(res, vmw_hw_context_destroy); 172 vmw_resource_activate(res, vmw_hw_context_destroy);
168 return 0; 173 return 0;
174
175out_err:
176 if (res_free)
177 res_free(res);
178 else
179 kfree(res);
180 return ret;
169} 181}
170 182
171static int vmw_context_init(struct vmw_private *dev_priv, 183static int vmw_context_init(struct vmw_private *dev_priv,
@@ -471,7 +483,8 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
471 */ 483 */
472 484
473 if (unlikely(vmw_user_context_size == 0)) 485 if (unlikely(vmw_user_context_size == 0))
474 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128; 486 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
487 ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
475 488
476 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 489 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
477 if (unlikely(ret != 0)) 490 if (unlikely(ret != 0))
@@ -901,3 +914,8 @@ struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
901{ 914{
902 return &(container_of(ctx, struct vmw_user_context, res)->cbs.list); 915 return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
903} 916}
917
918struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
919{
920 return container_of(ctx, struct vmw_user_context, res)->man;
921}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 70ddce8358b0..ed1d51006ab1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -61,7 +61,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
61 61
62 vmw_execbuf_release_pinned_bo(dev_priv); 62 vmw_execbuf_release_pinned_bo(dev_priv);
63 63
64 ret = ttm_bo_reserve(bo, interruptible, false, false, 0); 64 ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
65 if (unlikely(ret != 0)) 65 if (unlikely(ret != 0))
66 goto err; 66 goto err;
67 67
@@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
105 if (pin) 105 if (pin)
106 vmw_execbuf_release_pinned_bo(dev_priv); 106 vmw_execbuf_release_pinned_bo(dev_priv);
107 107
108 ret = ttm_bo_reserve(bo, interruptible, false, false, 0); 108 ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
109 if (unlikely(ret != 0)) 109 if (unlikely(ret != 0))
110 goto err; 110 goto err;
111 111
@@ -212,7 +212,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
212 212
213 if (pin) 213 if (pin)
214 vmw_execbuf_release_pinned_bo(dev_priv); 214 vmw_execbuf_release_pinned_bo(dev_priv);
215 ret = ttm_bo_reserve(bo, interruptible, false, false, 0); 215 ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
216 if (unlikely(ret != 0)) 216 if (unlikely(ret != 0))
217 goto err_unlock; 217 goto err_unlock;
218 218
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 246a62bab378..18b54acacfbb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -316,7 +316,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
316 if (unlikely(ret != 0)) 316 if (unlikely(ret != 0))
317 return ret; 317 return ret;
318 318
319 ret = ttm_bo_reserve(bo, false, true, false, 0); 319 ret = ttm_bo_reserve(bo, false, true, false, NULL);
320 BUG_ON(ret != 0); 320 BUG_ON(ret != 0);
321 321
322 ret = ttm_bo_kmap(bo, 0, 1, &map); 322 ret = ttm_bo_kmap(bo, 0, 1, &map);
@@ -946,7 +946,6 @@ static void vmw_postclose(struct drm_device *dev,
946 drm_master_put(&vmw_fp->locked_master); 946 drm_master_put(&vmw_fp->locked_master);
947 } 947 }
948 948
949 vmw_compat_shader_man_destroy(vmw_fp->shman);
950 ttm_object_file_release(&vmw_fp->tfile); 949 ttm_object_file_release(&vmw_fp->tfile);
951 kfree(vmw_fp); 950 kfree(vmw_fp);
952} 951}
@@ -966,16 +965,10 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
966 if (unlikely(vmw_fp->tfile == NULL)) 965 if (unlikely(vmw_fp->tfile == NULL))
967 goto out_no_tfile; 966 goto out_no_tfile;
968 967
969 vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
970 if (IS_ERR(vmw_fp->shman))
971 goto out_no_shman;
972
973 file_priv->driver_priv = vmw_fp; 968 file_priv->driver_priv = vmw_fp;
974 969
975 return 0; 970 return 0;
976 971
977out_no_shman:
978 ttm_object_file_release(&vmw_fp->tfile);
979out_no_tfile: 972out_no_tfile:
980 kfree(vmw_fp); 973 kfree(vmw_fp);
981 return ret; 974 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index c886c024c637..99f731757c4b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,10 +40,10 @@
40#include <drm/ttm/ttm_module.h> 40#include <drm/ttm/ttm_module.h>
41#include "vmwgfx_fence.h" 41#include "vmwgfx_fence.h"
42 42
43#define VMWGFX_DRIVER_DATE "20140325" 43#define VMWGFX_DRIVER_DATE "20140704"
44#define VMWGFX_DRIVER_MAJOR 2 44#define VMWGFX_DRIVER_MAJOR 2
45#define VMWGFX_DRIVER_MINOR 6 45#define VMWGFX_DRIVER_MINOR 6
46#define VMWGFX_DRIVER_PATCHLEVEL 0 46#define VMWGFX_DRIVER_PATCHLEVEL 1
47#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 47#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49#define VMWGFX_MAX_RELOCATIONS 2048 49#define VMWGFX_MAX_RELOCATIONS 2048
@@ -75,14 +75,11 @@
75#define VMW_RES_FENCE ttm_driver_type3 75#define VMW_RES_FENCE ttm_driver_type3
76#define VMW_RES_SHADER ttm_driver_type4 76#define VMW_RES_SHADER ttm_driver_type4
77 77
78struct vmw_compat_shader_manager;
79
80struct vmw_fpriv { 78struct vmw_fpriv {
81 struct drm_master *locked_master; 79 struct drm_master *locked_master;
82 struct ttm_object_file *tfile; 80 struct ttm_object_file *tfile;
83 struct list_head fence_events; 81 struct list_head fence_events;
84 bool gb_aware; 82 bool gb_aware;
85 struct vmw_compat_shader_manager *shman;
86}; 83};
87 84
88struct vmw_dma_buffer { 85struct vmw_dma_buffer {
@@ -124,6 +121,10 @@ struct vmw_resource {
124 void (*hw_destroy) (struct vmw_resource *res); 121 void (*hw_destroy) (struct vmw_resource *res);
125}; 122};
126 123
124
125/*
126 * Resources that are managed using ioctls.
127 */
127enum vmw_res_type { 128enum vmw_res_type {
128 vmw_res_context, 129 vmw_res_context,
129 vmw_res_surface, 130 vmw_res_surface,
@@ -132,6 +133,15 @@ enum vmw_res_type {
132 vmw_res_max 133 vmw_res_max
133}; 134};
134 135
136/*
137 * Resources that are managed using command streams.
138 */
139enum vmw_cmdbuf_res_type {
140 vmw_cmdbuf_res_compat_shader
141};
142
143struct vmw_cmdbuf_res_manager;
144
135struct vmw_cursor_snooper { 145struct vmw_cursor_snooper {
136 struct drm_crtc *crtc; 146 struct drm_crtc *crtc;
137 size_t age; 147 size_t age;
@@ -341,7 +351,7 @@ struct vmw_sw_context{
341 bool needs_post_query_barrier; 351 bool needs_post_query_barrier;
342 struct vmw_resource *error_resource; 352 struct vmw_resource *error_resource;
343 struct vmw_ctx_binding_state staged_bindings; 353 struct vmw_ctx_binding_state staged_bindings;
344 struct list_head staged_shaders; 354 struct list_head staged_cmd_res;
345}; 355};
346 356
347struct vmw_legacy_display; 357struct vmw_legacy_display;
@@ -974,7 +984,8 @@ extern void vmw_context_binding_res_list_kill(struct list_head *head);
974extern void vmw_context_binding_res_list_scrub(struct list_head *head); 984extern void vmw_context_binding_res_list_scrub(struct list_head *head);
975extern int vmw_context_rebind_all(struct vmw_resource *ctx); 985extern int vmw_context_rebind_all(struct vmw_resource *ctx);
976extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); 986extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
977 987extern struct vmw_cmdbuf_res_manager *
988vmw_context_res_man(struct vmw_resource *ctx);
978/* 989/*
979 * Surface management - vmwgfx_surface.c 990 * Surface management - vmwgfx_surface.c
980 */ 991 */
@@ -1008,27 +1019,42 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1008 struct drm_file *file_priv); 1019 struct drm_file *file_priv);
1009extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, 1020extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
1010 struct drm_file *file_priv); 1021 struct drm_file *file_priv);
1011extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man, 1022extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
1012 SVGA3dShaderType shader_type, 1023 struct vmw_cmdbuf_res_manager *man,
1013 u32 *user_key);
1014extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
1015 struct list_head *list);
1016extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
1017 struct list_head *list);
1018extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
1019 u32 user_key,
1020 SVGA3dShaderType shader_type,
1021 struct list_head *list);
1022extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
1023 u32 user_key, const void *bytecode, 1024 u32 user_key, const void *bytecode,
1024 SVGA3dShaderType shader_type, 1025 SVGA3dShaderType shader_type,
1025 size_t size, 1026 size_t size,
1026 struct ttm_object_file *tfile,
1027 struct list_head *list); 1027 struct list_head *list);
1028extern struct vmw_compat_shader_manager * 1028extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
1029vmw_compat_shader_man_create(struct vmw_private *dev_priv); 1029 u32 user_key, SVGA3dShaderType shader_type,
1030extern void 1030 struct list_head *list);
1031vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man); 1031extern struct vmw_resource *
1032vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
1033 u32 user_key, SVGA3dShaderType shader_type);
1034
1035/*
1036 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
1037 */
1038
1039extern struct vmw_cmdbuf_res_manager *
1040vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
1041extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
1042extern size_t vmw_cmdbuf_res_man_size(void);
1043extern struct vmw_resource *
1044vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
1045 enum vmw_cmdbuf_res_type res_type,
1046 u32 user_key);
1047extern void vmw_cmdbuf_res_revert(struct list_head *list);
1048extern void vmw_cmdbuf_res_commit(struct list_head *list);
1049extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1050 enum vmw_cmdbuf_res_type res_type,
1051 u32 user_key,
1052 struct vmw_resource *res,
1053 struct list_head *list);
1054extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1055 enum vmw_cmdbuf_res_type res_type,
1056 u32 user_key,
1057 struct list_head *list);
1032 1058
1033 1059
1034/** 1060/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 87df0b3674fd..7bfdaa163a33 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -422,28 +422,91 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
422 return 0; 422 return 0;
423} 423}
424 424
425
426/**
427 * vmw_cmd_res_reloc_add - Add a resource to a software context's
428 * relocation- and validation lists.
429 *
430 * @dev_priv: Pointer to a struct vmw_private identifying the device.
431 * @sw_context: Pointer to the software context.
432 * @res_type: Resource type.
433 * @id_loc: Pointer to where the id that needs translation is located.
434 * @res: Valid pointer to a struct vmw_resource.
435 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
436 * used for this resource is returned here.
437 */
438static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
439 struct vmw_sw_context *sw_context,
440 enum vmw_res_type res_type,
441 uint32_t *id_loc,
442 struct vmw_resource *res,
443 struct vmw_resource_val_node **p_val)
444{
445 int ret;
446 struct vmw_resource_val_node *node;
447
448 *p_val = NULL;
449 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
450 res,
451 id_loc - sw_context->buf_start);
452 if (unlikely(ret != 0))
453 goto out_err;
454
455 ret = vmw_resource_val_add(sw_context, res, &node);
456 if (unlikely(ret != 0))
457 goto out_err;
458
459 if (res_type == vmw_res_context && dev_priv->has_mob &&
460 node->first_usage) {
461
462 /*
463 * Put contexts first on the list to be able to exit
464 * list traversal for contexts early.
465 */
466 list_del(&node->head);
467 list_add(&node->head, &sw_context->resource_list);
468
469 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
470 if (unlikely(ret != 0))
471 goto out_err;
472 node->staged_bindings =
473 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
474 if (node->staged_bindings == NULL) {
475 DRM_ERROR("Failed to allocate context binding "
476 "information.\n");
477 goto out_err;
478 }
479 INIT_LIST_HEAD(&node->staged_bindings->list);
480 }
481
482 if (p_val)
483 *p_val = node;
484
485out_err:
486 return ret;
487}
488
489
425/** 490/**
426 * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it 491 * vmw_cmd_res_check - Check that a resource is present and if so, put it
427 * on the resource validate list unless it's already there. 492 * on the resource validate list unless it's already there.
428 * 493 *
429 * @dev_priv: Pointer to a device private structure. 494 * @dev_priv: Pointer to a device private structure.
430 * @sw_context: Pointer to the software context. 495 * @sw_context: Pointer to the software context.
431 * @res_type: Resource type. 496 * @res_type: Resource type.
432 * @converter: User-space visisble type specific information. 497 * @converter: User-space visisble type specific information.
433 * @id: user-space resource id handle.
434 * @id_loc: Pointer to the location in the command buffer currently being 498 * @id_loc: Pointer to the location in the command buffer currently being
435 * parsed from where the user-space resource id handle is located. 499 * parsed from where the user-space resource id handle is located.
436 * @p_val: Pointer to pointer to resource validalidation node. Populated 500 * @p_val: Pointer to pointer to resource validalidation node. Populated
437 * on exit. 501 * on exit.
438 */ 502 */
439static int 503static int
440vmw_cmd_compat_res_check(struct vmw_private *dev_priv, 504vmw_cmd_res_check(struct vmw_private *dev_priv,
441 struct vmw_sw_context *sw_context, 505 struct vmw_sw_context *sw_context,
442 enum vmw_res_type res_type, 506 enum vmw_res_type res_type,
443 const struct vmw_user_resource_conv *converter, 507 const struct vmw_user_resource_conv *converter,
444 uint32_t id, 508 uint32_t *id_loc,
445 uint32_t *id_loc, 509 struct vmw_resource_val_node **p_val)
446 struct vmw_resource_val_node **p_val)
447{ 510{
448 struct vmw_res_cache_entry *rcache = 511 struct vmw_res_cache_entry *rcache =
449 &sw_context->res_cache[res_type]; 512 &sw_context->res_cache[res_type];
@@ -451,7 +514,7 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
451 struct vmw_resource_val_node *node; 514 struct vmw_resource_val_node *node;
452 int ret; 515 int ret;
453 516
454 if (id == SVGA3D_INVALID_ID) { 517 if (*id_loc == SVGA3D_INVALID_ID) {
455 if (p_val) 518 if (p_val)
456 *p_val = NULL; 519 *p_val = NULL;
457 if (res_type == vmw_res_context) { 520 if (res_type == vmw_res_context) {
@@ -466,7 +529,7 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
466 * resource 529 * resource
467 */ 530 */
468 531
469 if (likely(rcache->valid && id == rcache->handle)) { 532 if (likely(rcache->valid && *id_loc == rcache->handle)) {
470 const struct vmw_resource *res = rcache->res; 533 const struct vmw_resource *res = rcache->res;
471 534
472 rcache->node->first_usage = false; 535 rcache->node->first_usage = false;
@@ -480,49 +543,28 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
480 543
481 ret = vmw_user_resource_lookup_handle(dev_priv, 544 ret = vmw_user_resource_lookup_handle(dev_priv,
482 sw_context->fp->tfile, 545 sw_context->fp->tfile,
483 id, 546 *id_loc,
484 converter, 547 converter,
485 &res); 548 &res);
486 if (unlikely(ret != 0)) { 549 if (unlikely(ret != 0)) {
487 DRM_ERROR("Could not find or use resource 0x%08x.\n", 550 DRM_ERROR("Could not find or use resource 0x%08x.\n",
488 (unsigned) id); 551 (unsigned) *id_loc);
489 dump_stack(); 552 dump_stack();
490 return ret; 553 return ret;
491 } 554 }
492 555
493 rcache->valid = true; 556 rcache->valid = true;
494 rcache->res = res; 557 rcache->res = res;
495 rcache->handle = id; 558 rcache->handle = *id_loc;
496
497 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
498 res,
499 id_loc - sw_context->buf_start);
500 if (unlikely(ret != 0))
501 goto out_no_reloc;
502 559
503 ret = vmw_resource_val_add(sw_context, res, &node); 560 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
561 res, &node);
504 if (unlikely(ret != 0)) 562 if (unlikely(ret != 0))
505 goto out_no_reloc; 563 goto out_no_reloc;
506 564
507 rcache->node = node; 565 rcache->node = node;
508 if (p_val) 566 if (p_val)
509 *p_val = node; 567 *p_val = node;
510
511 if (dev_priv->has_mob && node->first_usage &&
512 res_type == vmw_res_context) {
513 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
514 if (unlikely(ret != 0))
515 goto out_no_reloc;
516 node->staged_bindings =
517 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
518 if (node->staged_bindings == NULL) {
519 DRM_ERROR("Failed to allocate context binding "
520 "information.\n");
521 goto out_no_reloc;
522 }
523 INIT_LIST_HEAD(&node->staged_bindings->list);
524 }
525
526 vmw_resource_unreference(&res); 568 vmw_resource_unreference(&res);
527 return 0; 569 return 0;
528 570
@@ -534,31 +576,6 @@ out_no_reloc:
534} 576}
535 577
536/** 578/**
537 * vmw_cmd_res_check - Check that a resource is present and if so, put it
538 * on the resource validate list unless it's already there.
539 *
540 * @dev_priv: Pointer to a device private structure.
541 * @sw_context: Pointer to the software context.
542 * @res_type: Resource type.
543 * @converter: User-space visisble type specific information.
544 * @id_loc: Pointer to the location in the command buffer currently being
545 * parsed from where the user-space resource id handle is located.
546 * @p_val: Pointer to pointer to resource validalidation node. Populated
547 * on exit.
548 */
549static int
550vmw_cmd_res_check(struct vmw_private *dev_priv,
551 struct vmw_sw_context *sw_context,
552 enum vmw_res_type res_type,
553 const struct vmw_user_resource_conv *converter,
554 uint32_t *id_loc,
555 struct vmw_resource_val_node **p_val)
556{
557 return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
558 converter, *id_loc, id_loc, p_val);
559}
560
561/**
562 * vmw_rebind_contexts - Rebind all resources previously bound to 579 * vmw_rebind_contexts - Rebind all resources previously bound to
563 * referenced contexts. 580 * referenced contexts.
564 * 581 *
@@ -572,8 +589,8 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
572 int ret; 589 int ret;
573 590
574 list_for_each_entry(val, &sw_context->resource_list, head) { 591 list_for_each_entry(val, &sw_context->resource_list, head) {
575 if (likely(!val->staged_bindings)) 592 if (unlikely(!val->staged_bindings))
576 continue; 593 break;
577 594
578 ret = vmw_context_rebind_all(val->res); 595 ret = vmw_context_rebind_all(val->res);
579 if (unlikely(ret != 0)) { 596 if (unlikely(ret != 0)) {
@@ -1626,13 +1643,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1626 } *cmd; 1643 } *cmd;
1627 int ret; 1644 int ret;
1628 size_t size; 1645 size_t size;
1646 struct vmw_resource_val_node *val;
1629 1647
1630 cmd = container_of(header, struct vmw_shader_define_cmd, 1648 cmd = container_of(header, struct vmw_shader_define_cmd,
1631 header); 1649 header);
1632 1650
1633 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1651 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1634 user_context_converter, &cmd->body.cid, 1652 user_context_converter, &cmd->body.cid,
1635 NULL); 1653 &val);
1636 if (unlikely(ret != 0)) 1654 if (unlikely(ret != 0))
1637 return ret; 1655 return ret;
1638 1656
@@ -1640,11 +1658,11 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1640 return 0; 1658 return 0;
1641 1659
1642 size = cmd->header.size - sizeof(cmd->body); 1660 size = cmd->header.size - sizeof(cmd->body);
1643 ret = vmw_compat_shader_add(sw_context->fp->shman, 1661 ret = vmw_compat_shader_add(dev_priv,
1662 vmw_context_res_man(val->res),
1644 cmd->body.shid, cmd + 1, 1663 cmd->body.shid, cmd + 1,
1645 cmd->body.type, size, 1664 cmd->body.type, size,
1646 sw_context->fp->tfile, 1665 &sw_context->staged_cmd_res);
1647 &sw_context->staged_shaders);
1648 if (unlikely(ret != 0)) 1666 if (unlikely(ret != 0))
1649 return ret; 1667 return ret;
1650 1668
@@ -1672,23 +1690,24 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1672 SVGA3dCmdDestroyShader body; 1690 SVGA3dCmdDestroyShader body;
1673 } *cmd; 1691 } *cmd;
1674 int ret; 1692 int ret;
1693 struct vmw_resource_val_node *val;
1675 1694
1676 cmd = container_of(header, struct vmw_shader_destroy_cmd, 1695 cmd = container_of(header, struct vmw_shader_destroy_cmd,
1677 header); 1696 header);
1678 1697
1679 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 1698 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1680 user_context_converter, &cmd->body.cid, 1699 user_context_converter, &cmd->body.cid,
1681 NULL); 1700 &val);
1682 if (unlikely(ret != 0)) 1701 if (unlikely(ret != 0))
1683 return ret; 1702 return ret;
1684 1703
1685 if (unlikely(!dev_priv->has_mob)) 1704 if (unlikely(!dev_priv->has_mob))
1686 return 0; 1705 return 0;
1687 1706
1688 ret = vmw_compat_shader_remove(sw_context->fp->shman, 1707 ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
1689 cmd->body.shid, 1708 cmd->body.shid,
1690 cmd->body.type, 1709 cmd->body.type,
1691 &sw_context->staged_shaders); 1710 &sw_context->staged_cmd_res);
1692 if (unlikely(ret != 0)) 1711 if (unlikely(ret != 0))
1693 return ret; 1712 return ret;
1694 1713
@@ -1715,7 +1734,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1715 SVGA3dCmdHeader header; 1734 SVGA3dCmdHeader header;
1716 SVGA3dCmdSetShader body; 1735 SVGA3dCmdSetShader body;
1717 } *cmd; 1736 } *cmd;
1718 struct vmw_resource_val_node *ctx_node; 1737 struct vmw_resource_val_node *ctx_node, *res_node = NULL;
1738 struct vmw_ctx_bindinfo bi;
1739 struct vmw_resource *res = NULL;
1719 int ret; 1740 int ret;
1720 1741
1721 cmd = container_of(header, struct vmw_set_shader_cmd, 1742 cmd = container_of(header, struct vmw_set_shader_cmd,
@@ -1727,32 +1748,40 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1727 if (unlikely(ret != 0)) 1748 if (unlikely(ret != 0))
1728 return ret; 1749 return ret;
1729 1750
1730 if (dev_priv->has_mob) { 1751 if (!dev_priv->has_mob)
1731 struct vmw_ctx_bindinfo bi; 1752 return 0;
1732 struct vmw_resource_val_node *res_node; 1753
1733 u32 shid = cmd->body.shid; 1754 if (cmd->body.shid != SVGA3D_INVALID_ID) {
1734 1755 res = vmw_compat_shader_lookup
1735 if (shid != SVGA3D_INVALID_ID) 1756 (vmw_context_res_man(ctx_node->res),
1736 (void) vmw_compat_shader_lookup(sw_context->fp->shman, 1757 cmd->body.shid,
1737 cmd->body.type, 1758 cmd->body.type);
1738 &shid); 1759
1739 1760 if (!IS_ERR(res)) {
1740 ret = vmw_cmd_compat_res_check(dev_priv, sw_context, 1761 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
1741 vmw_res_shader, 1762 vmw_res_shader,
1742 user_shader_converter, 1763 &cmd->body.shid, res,
1743 shid, 1764 &res_node);
1744 &cmd->body.shid, &res_node); 1765 vmw_resource_unreference(&res);
1766 if (unlikely(ret != 0))
1767 return ret;
1768 }
1769 }
1770
1771 if (!res_node) {
1772 ret = vmw_cmd_res_check(dev_priv, sw_context,
1773 vmw_res_shader,
1774 user_shader_converter,
1775 &cmd->body.shid, &res_node);
1745 if (unlikely(ret != 0)) 1776 if (unlikely(ret != 0))
1746 return ret; 1777 return ret;
1747
1748 bi.ctx = ctx_node->res;
1749 bi.res = res_node ? res_node->res : NULL;
1750 bi.bt = vmw_ctx_binding_shader;
1751 bi.i1.shader_type = cmd->body.type;
1752 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1753 } 1778 }
1754 1779
1755 return 0; 1780 bi.ctx = ctx_node->res;
1781 bi.res = res_node ? res_node->res : NULL;
1782 bi.bt = vmw_ctx_binding_shader;
1783 bi.i1.shader_type = cmd->body.type;
1784 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1756} 1785}
1757 1786
1758/** 1787/**
@@ -2394,6 +2423,8 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2394 } 2423 }
2395} 2424}
2396 2425
2426
2427
2397int vmw_execbuf_process(struct drm_file *file_priv, 2428int vmw_execbuf_process(struct drm_file *file_priv,
2398 struct vmw_private *dev_priv, 2429 struct vmw_private *dev_priv,
2399 void __user *user_commands, 2430 void __user *user_commands,
@@ -2453,7 +2484,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2453 goto out_unlock; 2484 goto out_unlock;
2454 sw_context->res_ht_initialized = true; 2485 sw_context->res_ht_initialized = true;
2455 } 2486 }
2456 INIT_LIST_HEAD(&sw_context->staged_shaders); 2487 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
2457 2488
2458 INIT_LIST_HEAD(&resource_list); 2489 INIT_LIST_HEAD(&resource_list);
2459 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, 2490 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
@@ -2548,8 +2579,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2548 } 2579 }
2549 2580
2550 list_splice_init(&sw_context->resource_list, &resource_list); 2581 list_splice_init(&sw_context->resource_list, &resource_list);
2551 vmw_compat_shaders_commit(sw_context->fp->shman, 2582 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
2552 &sw_context->staged_shaders);
2553 mutex_unlock(&dev_priv->cmdbuf_mutex); 2583 mutex_unlock(&dev_priv->cmdbuf_mutex);
2554 2584
2555 /* 2585 /*
@@ -2576,8 +2606,7 @@ out_unlock:
2576 list_splice_init(&sw_context->resource_list, &resource_list); 2606 list_splice_init(&sw_context->resource_list, &resource_list);
2577 error_resource = sw_context->error_resource; 2607 error_resource = sw_context->error_resource;
2578 sw_context->error_resource = NULL; 2608 sw_context->error_resource = NULL;
2579 vmw_compat_shaders_revert(sw_context->fp->shman, 2609 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
2580 &sw_context->staged_shaders);
2581 mutex_unlock(&dev_priv->cmdbuf_mutex); 2610 mutex_unlock(&dev_priv->cmdbuf_mutex);
2582 2611
2583 /* 2612 /*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index b1273e8e9a69..26f8bdde3529 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -47,6 +47,7 @@ struct vmwgfx_gmrid_man {
47static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, 47static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
48 struct ttm_buffer_object *bo, 48 struct ttm_buffer_object *bo,
49 struct ttm_placement *placement, 49 struct ttm_placement *placement,
50 uint32_t flags,
50 struct ttm_mem_reg *mem) 51 struct ttm_mem_reg *mem)
51{ 52{
52 struct vmwgfx_gmrid_man *gman = 53 struct vmwgfx_gmrid_man *gman =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8f3edc4710f2..d2bc2b03d4c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -75,7 +75,7 @@ void vmw_display_unit_cleanup(struct vmw_display_unit *du)
75 vmw_surface_unreference(&du->cursor_surface); 75 vmw_surface_unreference(&du->cursor_surface);
76 if (du->cursor_dmabuf) 76 if (du->cursor_dmabuf)
77 vmw_dmabuf_unreference(&du->cursor_dmabuf); 77 vmw_dmabuf_unreference(&du->cursor_dmabuf);
78 drm_sysfs_connector_remove(&du->connector); 78 drm_connector_unregister(&du->connector);
79 drm_crtc_cleanup(&du->crtc); 79 drm_crtc_cleanup(&du->crtc);
80 drm_encoder_cleanup(&du->encoder); 80 drm_encoder_cleanup(&du->encoder);
81 drm_connector_cleanup(&du->connector); 81 drm_connector_cleanup(&du->connector);
@@ -136,7 +136,7 @@ int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
136 kmap_offset = 0; 136 kmap_offset = 0;
137 kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT; 137 kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
138 138
139 ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0); 139 ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL);
140 if (unlikely(ret != 0)) { 140 if (unlikely(ret != 0)) {
141 DRM_ERROR("reserve failed\n"); 141 DRM_ERROR("reserve failed\n");
142 return -EINVAL; 142 return -EINVAL;
@@ -343,7 +343,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
343 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; 343 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
344 kmap_num = (64*64*4) >> PAGE_SHIFT; 344 kmap_num = (64*64*4) >> PAGE_SHIFT;
345 345
346 ret = ttm_bo_reserve(bo, true, false, false, 0); 346 ret = ttm_bo_reserve(bo, true, false, false, NULL);
347 if (unlikely(ret != 0)) { 347 if (unlikely(ret != 0)) {
348 DRM_ERROR("reserve failed\n"); 348 DRM_ERROR("reserve failed\n");
349 return; 349 return;
@@ -1501,7 +1501,6 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1501{ 1501{
1502 struct drm_vmw_cursor_bypass_arg *arg = data; 1502 struct drm_vmw_cursor_bypass_arg *arg = data;
1503 struct vmw_display_unit *du; 1503 struct vmw_display_unit *du;
1504 struct drm_mode_object *obj;
1505 struct drm_crtc *crtc; 1504 struct drm_crtc *crtc;
1506 int ret = 0; 1505 int ret = 0;
1507 1506
@@ -1519,13 +1518,12 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1519 return 0; 1518 return 0;
1520 } 1519 }
1521 1520
1522 obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC); 1521 crtc = drm_crtc_find(dev, arg->crtc_id);
1523 if (!obj) { 1522 if (!crtc) {
1524 ret = -ENOENT; 1523 ret = -ENOENT;
1525 goto out; 1524 goto out;
1526 } 1525 }
1527 1526
1528 crtc = obj_to_crtc(obj);
1529 du = vmw_crtc_to_du(crtc); 1527 du = vmw_crtc_to_du(crtc);
1530 1528
1531 du->hotspot_x = arg->xhot; 1529 du->hotspot_x = arg->xhot;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index b2b9bd23aeee..15e185ae4c99 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -371,7 +371,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
371 encoder->possible_crtcs = (1 << unit); 371 encoder->possible_crtcs = (1 << unit);
372 encoder->possible_clones = 0; 372 encoder->possible_clones = 0;
373 373
374 (void) drm_sysfs_connector_add(connector); 374 (void) drm_connector_register(connector);
375 375
376 drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); 376 drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
377 377
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 01d68f0a69dc..a432c0db257c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -127,7 +127,7 @@ static void vmw_resource_release(struct kref *kref)
127 if (res->backup) { 127 if (res->backup) {
128 struct ttm_buffer_object *bo = &res->backup->base; 128 struct ttm_buffer_object *bo = &res->backup->base;
129 129
130 ttm_bo_reserve(bo, false, false, false, 0); 130 ttm_bo_reserve(bo, false, false, false, NULL);
131 if (!list_empty(&res->mob_head) && 131 if (!list_empty(&res->mob_head) &&
132 res->func->unbind != NULL) { 132 res->func->unbind != NULL) {
133 struct ttm_validate_buffer val_buf; 133 struct ttm_validate_buffer val_buf;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index a95d3a0cabe4..b295463a60b3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -467,7 +467,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
467 encoder->possible_crtcs = (1 << unit); 467 encoder->possible_crtcs = (1 << unit);
468 encoder->possible_clones = 0; 468 encoder->possible_clones = 0;
469 469
470 (void) drm_sysfs_connector_add(connector); 470 (void) drm_connector_register(connector);
471 471
472 drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs); 472 drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
473 473
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index c1559eeaffe9..8719fb3cccc9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -29,8 +29,6 @@
29#include "vmwgfx_resource_priv.h" 29#include "vmwgfx_resource_priv.h"
30#include "ttm/ttm_placement.h" 30#include "ttm/ttm_placement.h"
31 31
32#define VMW_COMPAT_SHADER_HT_ORDER 12
33
34struct vmw_shader { 32struct vmw_shader {
35 struct vmw_resource res; 33 struct vmw_resource res;
36 SVGA3dShaderType type; 34 SVGA3dShaderType type;
@@ -42,49 +40,8 @@ struct vmw_user_shader {
42 struct vmw_shader shader; 40 struct vmw_shader shader;
43}; 41};
44 42
45/** 43static uint64_t vmw_user_shader_size;
46 * enum vmw_compat_shader_state - Staging state for compat shaders 44static uint64_t vmw_shader_size;
47 */
48enum vmw_compat_shader_state {
49 VMW_COMPAT_COMMITED,
50 VMW_COMPAT_ADD,
51 VMW_COMPAT_DEL
52};
53
54/**
55 * struct vmw_compat_shader - Metadata for compat shaders.
56 *
57 * @handle: The TTM handle of the guest backed shader.
58 * @tfile: The struct ttm_object_file the guest backed shader is registered
59 * with.
60 * @hash: Hash item for lookup.
61 * @head: List head for staging lists or the compat shader manager list.
62 * @state: Staging state.
63 *
64 * The structure is protected by the cmdbuf lock.
65 */
66struct vmw_compat_shader {
67 u32 handle;
68 struct ttm_object_file *tfile;
69 struct drm_hash_item hash;
70 struct list_head head;
71 enum vmw_compat_shader_state state;
72};
73
74/**
75 * struct vmw_compat_shader_manager - Compat shader manager.
76 *
77 * @shaders: Hash table containing staged and commited compat shaders
78 * @list: List of commited shaders.
79 * @dev_priv: Pointer to a device private structure.
80 *
81 * @shaders and @list are protected by the cmdbuf mutex for now.
82 */
83struct vmw_compat_shader_manager {
84 struct drm_open_hash shaders;
85 struct list_head list;
86 struct vmw_private *dev_priv;
87};
88 45
89static void vmw_user_shader_free(struct vmw_resource *res); 46static void vmw_user_shader_free(struct vmw_resource *res);
90static struct vmw_resource * 47static struct vmw_resource *
@@ -98,8 +55,6 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
98 struct ttm_validate_buffer *val_buf); 55 struct ttm_validate_buffer *val_buf);
99static int vmw_gb_shader_destroy(struct vmw_resource *res); 56static int vmw_gb_shader_destroy(struct vmw_resource *res);
100 57
101static uint64_t vmw_user_shader_size;
102
103static const struct vmw_user_resource_conv user_shader_conv = { 58static const struct vmw_user_resource_conv user_shader_conv = {
104 .object_type = VMW_RES_SHADER, 59 .object_type = VMW_RES_SHADER,
105 .base_obj_to_res = vmw_user_shader_base_to_res, 60 .base_obj_to_res = vmw_user_shader_base_to_res,
@@ -347,6 +302,16 @@ static void vmw_user_shader_free(struct vmw_resource *res)
347 vmw_user_shader_size); 302 vmw_user_shader_size);
348} 303}
349 304
305static void vmw_shader_free(struct vmw_resource *res)
306{
307 struct vmw_shader *shader = vmw_res_to_shader(res);
308 struct vmw_private *dev_priv = res->dev_priv;
309
310 kfree(shader);
311 ttm_mem_global_free(vmw_mem_glob(dev_priv),
312 vmw_shader_size);
313}
314
350/** 315/**
351 * This function is called when user space has no more references on the 316 * This function is called when user space has no more references on the
352 * base object. It releases the base-object's reference on the resource object. 317 * base object. It releases the base-object's reference on the resource object.
@@ -371,13 +336,13 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
371 TTM_REF_USAGE); 336 TTM_REF_USAGE);
372} 337}
373 338
374static int vmw_shader_alloc(struct vmw_private *dev_priv, 339static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
375 struct vmw_dma_buffer *buffer, 340 struct vmw_dma_buffer *buffer,
376 size_t shader_size, 341 size_t shader_size,
377 size_t offset, 342 size_t offset,
378 SVGA3dShaderType shader_type, 343 SVGA3dShaderType shader_type,
379 struct ttm_object_file *tfile, 344 struct ttm_object_file *tfile,
380 u32 *handle) 345 u32 *handle)
381{ 346{
382 struct vmw_user_shader *ushader; 347 struct vmw_user_shader *ushader;
383 struct vmw_resource *res, *tmp; 348 struct vmw_resource *res, *tmp;
@@ -442,6 +407,56 @@ out:
442} 407}
443 408
444 409
410struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
411 struct vmw_dma_buffer *buffer,
412 size_t shader_size,
413 size_t offset,
414 SVGA3dShaderType shader_type)
415{
416 struct vmw_shader *shader;
417 struct vmw_resource *res;
418 int ret;
419
420 /*
421 * Approximate idr memory usage with 128 bytes. It will be limited
422 * by maximum number_of shaders anyway.
423 */
424 if (unlikely(vmw_shader_size == 0))
425 vmw_shader_size =
426 ttm_round_pot(sizeof(struct vmw_shader)) + 128;
427
428 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
429 vmw_shader_size,
430 false, true);
431 if (unlikely(ret != 0)) {
432 if (ret != -ERESTARTSYS)
433 DRM_ERROR("Out of graphics memory for shader "
434 "creation.\n");
435 goto out_err;
436 }
437
438 shader = kzalloc(sizeof(*shader), GFP_KERNEL);
439 if (unlikely(shader == NULL)) {
440 ttm_mem_global_free(vmw_mem_glob(dev_priv),
441 vmw_shader_size);
442 ret = -ENOMEM;
443 goto out_err;
444 }
445
446 res = &shader->res;
447
448 /*
449 * From here on, the destructor takes over resource freeing.
450 */
451 ret = vmw_gb_shader_init(dev_priv, res, shader_size,
452 offset, shader_type, buffer,
453 vmw_shader_free);
454
455out_err:
456 return ret ? ERR_PTR(ret) : res;
457}
458
459
445int vmw_shader_define_ioctl(struct drm_device *dev, void *data, 460int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
446 struct drm_file *file_priv) 461 struct drm_file *file_priv)
447{ 462{
@@ -490,8 +505,8 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
490 if (unlikely(ret != 0)) 505 if (unlikely(ret != 0))
491 goto out_bad_arg; 506 goto out_bad_arg;
492 507
493 ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset, 508 ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
494 shader_type, tfile, &arg->shader_handle); 509 shader_type, tfile, &arg->shader_handle);
495 510
496 ttm_read_unlock(&dev_priv->reservation_sem); 511 ttm_read_unlock(&dev_priv->reservation_sem);
497out_bad_arg: 512out_bad_arg:
@@ -500,202 +515,83 @@ out_bad_arg:
500} 515}
501 516
502/** 517/**
503 * vmw_compat_shader_lookup - Look up a compat shader 518 * vmw_compat_shader_id_ok - Check whether a compat shader user key and
504 * 519 * shader type are within valid bounds.
505 * @man: Pointer to the compat shader manager.
506 * @shader_type: The shader type, that combined with the user_key identifies
507 * the shader.
508 * @user_key: On entry, this should be a pointer to the user_key.
509 * On successful exit, it will contain the guest-backed shader's TTM handle.
510 * 520 *
511 * Returns 0 on success. Non-zero on failure, in which case the value pointed 521 * @user_key: User space id of the shader.
512 * to by @user_key is unmodified. 522 * @shader_type: Shader type.
513 */
514int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
515 SVGA3dShaderType shader_type,
516 u32 *user_key)
517{
518 struct drm_hash_item *hash;
519 int ret;
520 unsigned long key = *user_key | (shader_type << 24);
521
522 ret = drm_ht_find_item(&man->shaders, key, &hash);
523 if (unlikely(ret != 0))
524 return ret;
525
526 *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
527 hash)->handle;
528
529 return 0;
530}
531
532/**
533 * vmw_compat_shader_free - Free a compat shader.
534 *
535 * @man: Pointer to the compat shader manager.
536 * @entry: Pointer to a struct vmw_compat_shader.
537 *
538 * Frees a struct vmw_compat_shder entry and drops its reference to the
539 * guest backed shader.
540 */
541static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
542 struct vmw_compat_shader *entry)
543{
544 list_del(&entry->head);
545 WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
546 WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
547 TTM_REF_USAGE));
548 kfree(entry);
549}
550
551/**
552 * vmw_compat_shaders_commit - Commit a list of compat shader actions.
553 *
554 * @man: Pointer to the compat shader manager.
555 * @list: Caller's list of compat shader actions.
556 * 523 *
557 * This function commits a list of compat shader additions or removals. 524 * Returns true if valid false if not.
558 * It is typically called when the execbuf ioctl call triggering these
559 * actions has commited the fifo contents to the device.
560 */ 525 */
561void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man, 526static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
562 struct list_head *list)
563{ 527{
564 struct vmw_compat_shader *entry, *next; 528 return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
565
566 list_for_each_entry_safe(entry, next, list, head) {
567 list_del(&entry->head);
568 switch (entry->state) {
569 case VMW_COMPAT_ADD:
570 entry->state = VMW_COMPAT_COMMITED;
571 list_add_tail(&entry->head, &man->list);
572 break;
573 case VMW_COMPAT_DEL:
574 ttm_ref_object_base_unref(entry->tfile, entry->handle,
575 TTM_REF_USAGE);
576 kfree(entry);
577 break;
578 default:
579 BUG();
580 break;
581 }
582 }
583} 529}
584 530
585/** 531/**
586 * vmw_compat_shaders_revert - Revert a list of compat shader actions 532 * vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
587 * 533 *
588 * @man: Pointer to the compat shader manager. 534 * @user_key: User space id of the shader.
589 * @list: Caller's list of compat shader actions. 535 * @shader_type: Shader type.
590 * 536 *
591 * This function reverts a list of compat shader additions or removals. 537 * Returns a hash key suitable for a command buffer managed resource
592 * It is typically called when the execbuf ioctl call triggering these 538 * manager hash table.
593 * actions failed for some reason, and the command stream was never
594 * submitted.
595 */ 539 */
596void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man, 540static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
597 struct list_head *list)
598{ 541{
599 struct vmw_compat_shader *entry, *next; 542 return user_key | (shader_type << 20);
600 int ret;
601
602 list_for_each_entry_safe(entry, next, list, head) {
603 switch (entry->state) {
604 case VMW_COMPAT_ADD:
605 vmw_compat_shader_free(man, entry);
606 break;
607 case VMW_COMPAT_DEL:
608 ret = drm_ht_insert_item(&man->shaders, &entry->hash);
609 list_del(&entry->head);
610 list_add_tail(&entry->head, &man->list);
611 entry->state = VMW_COMPAT_COMMITED;
612 break;
613 default:
614 BUG();
615 break;
616 }
617 }
618} 543}
619 544
620/** 545/**
621 * vmw_compat_shader_remove - Stage a compat shader for removal. 546 * vmw_compat_shader_remove - Stage a compat shader for removal.
622 * 547 *
623 * @man: Pointer to the compat shader manager 548 * @man: Pointer to the compat shader manager identifying the shader namespace.
624 * @user_key: The key that is used to identify the shader. The key is 549 * @user_key: The key that is used to identify the shader. The key is
625 * unique to the shader type. 550 * unique to the shader type.
626 * @shader_type: Shader type. 551 * @shader_type: Shader type.
627 * @list: Caller's list of staged shader actions. 552 * @list: Caller's list of staged command buffer resource actions.
628 *
629 * This function stages a compat shader for removal and removes the key from
630 * the shader manager's hash table. If the shader was previously only staged
631 * for addition it is completely removed (But the execbuf code may keep a
632 * reference if it was bound to a context between addition and removal). If
633 * it was previously commited to the manager, it is staged for removal.
634 */ 553 */
635int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man, 554int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
636 u32 user_key, SVGA3dShaderType shader_type, 555 u32 user_key, SVGA3dShaderType shader_type,
637 struct list_head *list) 556 struct list_head *list)
638{ 557{
639 struct vmw_compat_shader *entry; 558 if (!vmw_compat_shader_id_ok(user_key, shader_type))
640 struct drm_hash_item *hash;
641 int ret;
642
643 ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
644 &hash);
645 if (likely(ret != 0))
646 return -EINVAL; 559 return -EINVAL;
647 560
648 entry = drm_hash_entry(hash, struct vmw_compat_shader, hash); 561 return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
649 562 vmw_compat_shader_key(user_key,
650 switch (entry->state) { 563 shader_type),
651 case VMW_COMPAT_ADD: 564 list);
652 vmw_compat_shader_free(man, entry);
653 break;
654 case VMW_COMPAT_COMMITED:
655 (void) drm_ht_remove_item(&man->shaders, &entry->hash);
656 list_del(&entry->head);
657 entry->state = VMW_COMPAT_DEL;
658 list_add_tail(&entry->head, list);
659 break;
660 default:
661 BUG();
662 break;
663 }
664
665 return 0;
666} 565}
667 566
668/** 567/**
669 * vmw_compat_shader_add - Create a compat shader and add the 568 * vmw_compat_shader_add - Create a compat shader and stage it for addition
670 * key to the manager 569 * as a command buffer managed resource.
671 * 570 *
672 * @man: Pointer to the compat shader manager 571 * @man: Pointer to the compat shader manager identifying the shader namespace.
673 * @user_key: The key that is used to identify the shader. The key is 572 * @user_key: The key that is used to identify the shader. The key is
674 * unique to the shader type. 573 * unique to the shader type.
675 * @bytecode: Pointer to the bytecode of the shader. 574 * @bytecode: Pointer to the bytecode of the shader.
676 * @shader_type: Shader type. 575 * @shader_type: Shader type.
677 * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is 576 * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
678 * to be created with. 577 * to be created with.
679 * @list: Caller's list of staged shader actions. 578 * @list: Caller's list of staged command buffer resource actions.
680 * 579 *
681 * Note that only the key is added to the shader manager's hash table.
682 * The shader is not yet added to the shader manager's list of shaders.
683 */ 580 */
684int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, 581int vmw_compat_shader_add(struct vmw_private *dev_priv,
582 struct vmw_cmdbuf_res_manager *man,
685 u32 user_key, const void *bytecode, 583 u32 user_key, const void *bytecode,
686 SVGA3dShaderType shader_type, 584 SVGA3dShaderType shader_type,
687 size_t size, 585 size_t size,
688 struct ttm_object_file *tfile,
689 struct list_head *list) 586 struct list_head *list)
690{ 587{
691 struct vmw_dma_buffer *buf; 588 struct vmw_dma_buffer *buf;
692 struct ttm_bo_kmap_obj map; 589 struct ttm_bo_kmap_obj map;
693 bool is_iomem; 590 bool is_iomem;
694 struct vmw_compat_shader *compat;
695 u32 handle;
696 int ret; 591 int ret;
592 struct vmw_resource *res;
697 593
698 if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16) 594 if (!vmw_compat_shader_id_ok(user_key, shader_type))
699 return -EINVAL; 595 return -EINVAL;
700 596
701 /* Allocate and pin a DMA buffer */ 597 /* Allocate and pin a DMA buffer */
@@ -703,7 +599,7 @@ int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
703 if (unlikely(buf == NULL)) 599 if (unlikely(buf == NULL))
704 return -ENOMEM; 600 return -ENOMEM;
705 601
706 ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement, 602 ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
707 true, vmw_dmabuf_bo_free); 603 true, vmw_dmabuf_bo_free);
708 if (unlikely(ret != 0)) 604 if (unlikely(ret != 0))
709 goto out; 605 goto out;
@@ -728,84 +624,40 @@ int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
728 WARN_ON(ret != 0); 624 WARN_ON(ret != 0);
729 ttm_bo_unreserve(&buf->base); 625 ttm_bo_unreserve(&buf->base);
730 626
731 /* Create a guest-backed shader container backed by the dma buffer */ 627 res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
732 ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
733 tfile, &handle);
734 vmw_dmabuf_unreference(&buf);
735 if (unlikely(ret != 0)) 628 if (unlikely(ret != 0))
736 goto no_reserve; 629 goto no_reserve;
737 /*
738 * Create a compat shader structure and stage it for insertion
739 * in the manager
740 */
741 compat = kzalloc(sizeof(*compat), GFP_KERNEL);
742 if (compat == NULL)
743 goto no_compat;
744
745 compat->hash.key = user_key | (shader_type << 24);
746 ret = drm_ht_insert_item(&man->shaders, &compat->hash);
747 if (unlikely(ret != 0))
748 goto out_invalid_key;
749
750 compat->state = VMW_COMPAT_ADD;
751 compat->handle = handle;
752 compat->tfile = tfile;
753 list_add_tail(&compat->head, list);
754
755 return 0;
756 630
757out_invalid_key: 631 ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
758 kfree(compat); 632 vmw_compat_shader_key(user_key, shader_type),
759no_compat: 633 res, list);
760 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); 634 vmw_resource_unreference(&res);
761no_reserve: 635no_reserve:
636 vmw_dmabuf_unreference(&buf);
762out: 637out:
763 return ret; 638 return ret;
764} 639}
765 640
766/** 641/**
767 * vmw_compat_shader_man_create - Create a compat shader manager 642 * vmw_compat_shader_lookup - Look up a compat shader
768 *
769 * @dev_priv: Pointer to a device private structure.
770 *
771 * Typically done at file open time. If successful returns a pointer to a
772 * compat shader manager. Otherwise returns an error pointer.
773 */
774struct vmw_compat_shader_manager *
775vmw_compat_shader_man_create(struct vmw_private *dev_priv)
776{
777 struct vmw_compat_shader_manager *man;
778 int ret;
779
780 man = kzalloc(sizeof(*man), GFP_KERNEL);
781 if (man == NULL)
782 return ERR_PTR(-ENOMEM);
783
784 man->dev_priv = dev_priv;
785 INIT_LIST_HEAD(&man->list);
786 ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
787 if (ret == 0)
788 return man;
789
790 kfree(man);
791 return ERR_PTR(ret);
792}
793
794/**
795 * vmw_compat_shader_man_destroy - Destroy a compat shader manager
796 * 643 *
797 * @man: Pointer to the shader manager to destroy. 644 * @man: Pointer to the command buffer managed resource manager identifying
645 * the shader namespace.
646 * @user_key: The user space id of the shader.
647 * @shader_type: The shader type.
798 * 648 *
799 * Typically done at file close time. 649 * Returns a refcounted pointer to a struct vmw_resource if the shader was
650 * found. An error pointer otherwise.
800 */ 651 */
801void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man) 652struct vmw_resource *
653vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
654 u32 user_key,
655 SVGA3dShaderType shader_type)
802{ 656{
803 struct vmw_compat_shader *entry, *next; 657 if (!vmw_compat_shader_id_ok(user_key, shader_type))
804 658 return ERR_PTR(-EINVAL);
805 mutex_lock(&man->dev_priv->cmdbuf_mutex);
806 list_for_each_entry_safe(entry, next, &man->list, head)
807 vmw_compat_shader_free(man, entry);
808 659
809 mutex_unlock(&man->dev_priv->cmdbuf_mutex); 660 return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
810 kfree(man); 661 vmw_compat_shader_key(user_key,
662 shader_type));
811} 663}
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 112f27e51bc7..63bd63f3c7df 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -185,16 +185,16 @@ static unsigned int pin_job(struct host1x_job *job)
185 struct sg_table *sgt; 185 struct sg_table *sgt;
186 dma_addr_t phys_addr; 186 dma_addr_t phys_addr;
187 187
188 reloc->target = host1x_bo_get(reloc->target); 188 reloc->target.bo = host1x_bo_get(reloc->target.bo);
189 if (!reloc->target) 189 if (!reloc->target.bo)
190 goto unpin; 190 goto unpin;
191 191
192 phys_addr = host1x_bo_pin(reloc->target, &sgt); 192 phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
193 if (!phys_addr) 193 if (!phys_addr)
194 goto unpin; 194 goto unpin;
195 195
196 job->addr_phys[job->num_unpins] = phys_addr; 196 job->addr_phys[job->num_unpins] = phys_addr;
197 job->unpins[job->num_unpins].bo = reloc->target; 197 job->unpins[job->num_unpins].bo = reloc->target.bo;
198 job->unpins[job->num_unpins].sgt = sgt; 198 job->unpins[job->num_unpins].sgt = sgt;
199 job->num_unpins++; 199 job->num_unpins++;
200 } 200 }
@@ -235,21 +235,21 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
235 for (i = 0; i < job->num_relocs; i++) { 235 for (i = 0; i < job->num_relocs; i++) {
236 struct host1x_reloc *reloc = &job->relocarray[i]; 236 struct host1x_reloc *reloc = &job->relocarray[i];
237 u32 reloc_addr = (job->reloc_addr_phys[i] + 237 u32 reloc_addr = (job->reloc_addr_phys[i] +
238 reloc->target_offset) >> reloc->shift; 238 reloc->target.offset) >> reloc->shift;
239 u32 *target; 239 u32 *target;
240 240
241 /* skip all other gathers */ 241 /* skip all other gathers */
242 if (cmdbuf != reloc->cmdbuf) 242 if (cmdbuf != reloc->cmdbuf.bo)
243 continue; 243 continue;
244 244
245 if (last_page != reloc->cmdbuf_offset >> PAGE_SHIFT) { 245 if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) {
246 if (cmdbuf_page_addr) 246 if (cmdbuf_page_addr)
247 host1x_bo_kunmap(cmdbuf, last_page, 247 host1x_bo_kunmap(cmdbuf, last_page,
248 cmdbuf_page_addr); 248 cmdbuf_page_addr);
249 249
250 cmdbuf_page_addr = host1x_bo_kmap(cmdbuf, 250 cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
251 reloc->cmdbuf_offset >> PAGE_SHIFT); 251 reloc->cmdbuf.offset >> PAGE_SHIFT);
252 last_page = reloc->cmdbuf_offset >> PAGE_SHIFT; 252 last_page = reloc->cmdbuf.offset >> PAGE_SHIFT;
253 253
254 if (unlikely(!cmdbuf_page_addr)) { 254 if (unlikely(!cmdbuf_page_addr)) {
255 pr_err("Could not map cmdbuf for relocation\n"); 255 pr_err("Could not map cmdbuf for relocation\n");
@@ -257,7 +257,7 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
257 } 257 }
258 } 258 }
259 259
260 target = cmdbuf_page_addr + (reloc->cmdbuf_offset & ~PAGE_MASK); 260 target = cmdbuf_page_addr + (reloc->cmdbuf.offset & ~PAGE_MASK);
261 *target = reloc_addr; 261 *target = reloc_addr;
262 } 262 }
263 263
@@ -272,7 +272,7 @@ static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
272{ 272{
273 offset *= sizeof(u32); 273 offset *= sizeof(u32);
274 274
275 if (reloc->cmdbuf != cmdbuf || reloc->cmdbuf_offset != offset) 275 if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
276 return false; 276 return false;
277 277
278 return true; 278 return true;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index af0259708358..d2077f040f3e 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -237,12 +237,10 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
237 if (conflict->locks & lwants) 237 if (conflict->locks & lwants)
238 return conflict; 238 return conflict;
239 239
240 /* Ok, now check if he owns the resource we want. We don't need 240 /* Ok, now check if it owns the resource we want. We can
241 * to check "decodes" since it should be impossible to own 241 * lock resources that are not decoded, therefore a device
242 * own legacy resources you don't decode unless I have a bug 242 * can own resources it doesn't decode.
243 * in this code...
244 */ 243 */
245 WARN_ON(conflict->owns & ~conflict->decodes);
246 match = lwants & conflict->owns; 244 match = lwants & conflict->owns;
247 if (!match) 245 if (!match)
248 continue; 246 continue;
@@ -254,13 +252,19 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
254 flags = 0; 252 flags = 0;
255 pci_bits = 0; 253 pci_bits = 0;
256 254
255 /* If we can't control legacy resources via the bridge, we
256 * also need to disable normal decoding.
257 */
257 if (!conflict->bridge_has_one_vga) { 258 if (!conflict->bridge_has_one_vga) {
258 vga_irq_set_state(conflict, false); 259 if ((match & conflict->decodes) & VGA_RSRC_LEGACY_MEM)
259 flags |= PCI_VGA_STATE_CHANGE_DECODES;
260 if (match & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
261 pci_bits |= PCI_COMMAND_MEMORY; 260 pci_bits |= PCI_COMMAND_MEMORY;
262 if (match & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) 261 if ((match & conflict->decodes) & VGA_RSRC_LEGACY_IO)
263 pci_bits |= PCI_COMMAND_IO; 262 pci_bits |= PCI_COMMAND_IO;
263
264 if (pci_bits) {
265 vga_irq_set_state(conflict, false);
266 flags |= PCI_VGA_STATE_CHANGE_DECODES;
267 }
264 } 268 }
265 269
266 if (change_bridge) 270 if (change_bridge)
@@ -268,18 +272,19 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
268 272
269 pci_set_vga_state(conflict->pdev, false, pci_bits, flags); 273 pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
270 conflict->owns &= ~match; 274 conflict->owns &= ~match;
271 /* If he also owned non-legacy, that is no longer the case */ 275
272 if (match & VGA_RSRC_LEGACY_MEM) 276 /* If we disabled normal decoding, reflect it in owns */
277 if (pci_bits & PCI_COMMAND_MEMORY)
273 conflict->owns &= ~VGA_RSRC_NORMAL_MEM; 278 conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
274 if (match & VGA_RSRC_LEGACY_IO) 279 if (pci_bits & PCI_COMMAND_IO)
275 conflict->owns &= ~VGA_RSRC_NORMAL_IO; 280 conflict->owns &= ~VGA_RSRC_NORMAL_IO;
276 } 281 }
277 282
278enable_them: 283enable_them:
279 /* ok dude, we got it, everybody conflicting has been disabled, let's 284 /* ok dude, we got it, everybody conflicting has been disabled, let's
280 * enable us. Make sure we don't mark a bit in "owns" that we don't 285 * enable us. Mark any bits in "owns" regardless of whether we
281 * also have in "decodes". We can lock resources we don't decode but 286 * decoded them. We can lock resources we don't decode, therefore
282 * not own them. 287 * we must track them via "owns".
283 */ 288 */
284 flags = 0; 289 flags = 0;
285 pci_bits = 0; 290 pci_bits = 0;
@@ -291,7 +296,7 @@ enable_them:
291 if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) 296 if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
292 pci_bits |= PCI_COMMAND_IO; 297 pci_bits |= PCI_COMMAND_IO;
293 } 298 }
294 if (!!(wants & VGA_RSRC_LEGACY_MASK)) 299 if (wants & VGA_RSRC_LEGACY_MASK)
295 flags |= PCI_VGA_STATE_CHANGE_BRIDGE; 300 flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
296 301
297 pci_set_vga_state(vgadev->pdev, true, pci_bits, flags); 302 pci_set_vga_state(vgadev->pdev, true, pci_bits, flags);
@@ -299,7 +304,7 @@ enable_them:
299 if (!vgadev->bridge_has_one_vga) { 304 if (!vgadev->bridge_has_one_vga) {
300 vga_irq_set_state(vgadev, true); 305 vga_irq_set_state(vgadev, true);
301 } 306 }
302 vgadev->owns |= (wants & vgadev->decodes); 307 vgadev->owns |= wants;
303lock_them: 308lock_them:
304 vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK); 309 vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK);
305 if (rsrc & VGA_RSRC_LEGACY_IO) 310 if (rsrc & VGA_RSRC_LEGACY_IO)
@@ -649,7 +654,6 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
649 old_decodes = vgadev->decodes; 654 old_decodes = vgadev->decodes;
650 decodes_removed = ~new_decodes & old_decodes; 655 decodes_removed = ~new_decodes & old_decodes;
651 decodes_unlocked = vgadev->locks & decodes_removed; 656 decodes_unlocked = vgadev->locks & decodes_removed;
652 vgadev->owns &= ~decodes_removed;
653 vgadev->decodes = new_decodes; 657 vgadev->decodes = new_decodes;
654 658
655 pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n", 659 pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 47ee6c79857a..6b22106534d8 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -202,7 +202,7 @@ static const struct file_operations imx_drm_driver_fops = {
202 202
203void imx_drm_connector_destroy(struct drm_connector *connector) 203void imx_drm_connector_destroy(struct drm_connector *connector)
204{ 204{
205 drm_sysfs_connector_remove(connector); 205 drm_connector_unregister(connector);
206 drm_connector_cleanup(connector); 206 drm_connector_cleanup(connector);
207} 207}
208EXPORT_SYMBOL_GPL(imx_drm_connector_destroy); 208EXPORT_SYMBOL_GPL(imx_drm_connector_destroy);
@@ -293,10 +293,10 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
293 * userspace will expect to be able to access DRM at this point. 293 * userspace will expect to be able to access DRM at this point.
294 */ 294 */
295 list_for_each_entry(connector, &drm->mode_config.connector_list, head) { 295 list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
296 ret = drm_sysfs_connector_add(connector); 296 ret = drm_connector_register(connector);
297 if (ret) { 297 if (ret) {
298 dev_err(drm->dev, 298 dev_err(drm->dev,
299 "[CONNECTOR:%d:%s] drm_sysfs_connector_add failed: %d\n", 299 "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
300 connector->base.id, 300 connector->base.id,
301 connector->name, ret); 301 connector->name, ret);
302 goto err_unbind; 302 goto err_unbind;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index e41f17ea1f13..196890735367 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -152,8 +152,6 @@ int drm_err(const char *func, const char *format, ...);
152 also include looping detection. */ 152 also include looping detection. */
153 153
154#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ 154#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
155#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */
156#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */
157 155
158#define DRM_MAP_HASH_OFFSET 0x10000000 156#define DRM_MAP_HASH_OFFSET 0x10000000
159 157
@@ -347,18 +345,6 @@ struct drm_waitlist {
347 spinlock_t write_lock; 345 spinlock_t write_lock;
348}; 346};
349 347
350struct drm_freelist {
351 int initialized; /**< Freelist in use */
352 atomic_t count; /**< Number of free buffers */
353 struct drm_buf *next; /**< End pointer */
354
355 wait_queue_head_t waiting; /**< Processes waiting on free bufs */
356 int low_mark; /**< Low water mark */
357 int high_mark; /**< High water mark */
358 atomic_t wfh; /**< If waiting for high mark */
359 spinlock_t lock;
360};
361
362typedef struct drm_dma_handle { 348typedef struct drm_dma_handle {
363 dma_addr_t busaddr; 349 dma_addr_t busaddr;
364 void *vaddr; 350 void *vaddr;
@@ -376,7 +362,8 @@ struct drm_buf_entry {
376 int page_order; 362 int page_order;
377 struct drm_dma_handle **seglist; 363 struct drm_dma_handle **seglist;
378 364
379 struct drm_freelist freelist; 365 int low_mark; /**< Low water mark */
366 int high_mark; /**< High water mark */
380}; 367};
381 368
382/* Event queued up for userspace to read */ 369/* Event queued up for userspace to read */
@@ -397,7 +384,6 @@ struct drm_prime_file_private {
397 384
398/** File private data */ 385/** File private data */
399struct drm_file { 386struct drm_file {
400 unsigned always_authenticated :1;
401 unsigned authenticated :1; 387 unsigned authenticated :1;
402 /* Whether we're master for a minor. Protected by master_mutex */ 388 /* Whether we're master for a minor. Protected by master_mutex */
403 unsigned is_master :1; 389 unsigned is_master :1;
@@ -442,23 +428,6 @@ struct drm_file {
442 struct drm_prime_file_private prime; 428 struct drm_prime_file_private prime;
443}; 429};
444 430
445/** Wait queue */
446struct drm_queue {
447 atomic_t use_count; /**< Outstanding uses (+1) */
448 atomic_t finalization; /**< Finalization in progress */
449 atomic_t block_count; /**< Count of processes waiting */
450 atomic_t block_read; /**< Queue blocked for reads */
451 wait_queue_head_t read_queue; /**< Processes waiting on block_read */
452 atomic_t block_write; /**< Queue blocked for writes */
453 wait_queue_head_t write_queue; /**< Processes waiting on block_write */
454 atomic_t total_queued; /**< Total queued statistic */
455 atomic_t total_flushed; /**< Total flushes statistic */
456 atomic_t total_locks; /**< Total locks statistics */
457 enum drm_ctx_flags flags; /**< Context preserving and 2D-only */
458 struct drm_waitlist waitlist; /**< Pending buffers */
459 wait_queue_head_t flush_queue; /**< Processes waiting until flush */
460};
461
462/** 431/**
463 * Lock data. 432 * Lock data.
464 */ 433 */
@@ -567,15 +536,6 @@ struct drm_map_list {
567 struct drm_master *master; 536 struct drm_master *master;
568}; 537};
569 538
570/**
571 * Context handle list
572 */
573struct drm_ctx_list {
574 struct list_head head; /**< list head */
575 drm_context_t handle; /**< context handle */
576 struct drm_file *tag; /**< associated fd private data */
577};
578
579/* location of GART table */ 539/* location of GART table */
580#define DRM_ATI_GART_MAIN 1 540#define DRM_ATI_GART_MAIN 1
581#define DRM_ATI_GART_FB 2 541#define DRM_ATI_GART_FB 2
@@ -1218,7 +1178,6 @@ extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
1218 /* Device support (drm_fops.h) */ 1178 /* Device support (drm_fops.h) */
1219extern struct mutex drm_global_mutex; 1179extern struct mutex drm_global_mutex;
1220extern int drm_open(struct inode *inode, struct file *filp); 1180extern int drm_open(struct inode *inode, struct file *filp);
1221extern int drm_stub_open(struct inode *inode, struct file *filp);
1222extern ssize_t drm_read(struct file *filp, char __user *buffer, 1181extern ssize_t drm_read(struct file *filp, char __user *buffer,
1223 size_t count, loff_t *offset); 1182 size_t count, loff_t *offset);
1224extern int drm_release(struct inode *inode, struct file *filp); 1183extern int drm_release(struct inode *inode, struct file *filp);
@@ -1256,29 +1215,6 @@ extern int drm_setversion(struct drm_device *dev, void *data,
1256extern int drm_noop(struct drm_device *dev, void *data, 1215extern int drm_noop(struct drm_device *dev, void *data,
1257 struct drm_file *file_priv); 1216 struct drm_file *file_priv);
1258 1217
1259 /* Context IOCTL support (drm_context.h) */
1260extern int drm_resctx(struct drm_device *dev, void *data,
1261 struct drm_file *file_priv);
1262extern int drm_addctx(struct drm_device *dev, void *data,
1263 struct drm_file *file_priv);
1264extern int drm_getctx(struct drm_device *dev, void *data,
1265 struct drm_file *file_priv);
1266extern int drm_switchctx(struct drm_device *dev, void *data,
1267 struct drm_file *file_priv);
1268extern int drm_newctx(struct drm_device *dev, void *data,
1269 struct drm_file *file_priv);
1270extern int drm_rmctx(struct drm_device *dev, void *data,
1271 struct drm_file *file_priv);
1272
1273extern int drm_ctxbitmap_init(struct drm_device *dev);
1274extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
1275extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
1276
1277extern int drm_setsareactx(struct drm_device *dev, void *data,
1278 struct drm_file *file_priv);
1279extern int drm_getsareactx(struct drm_device *dev, void *data,
1280 struct drm_file *file_priv);
1281
1282 /* Authentication IOCTL support (drm_auth.h) */ 1218 /* Authentication IOCTL support (drm_auth.h) */
1283extern int drm_getmagic(struct drm_device *dev, void *data, 1219extern int drm_getmagic(struct drm_device *dev, void *data,
1284 struct drm_file *file_priv); 1220 struct drm_file *file_priv);
@@ -1398,17 +1334,12 @@ extern void drm_master_put(struct drm_master **master);
1398extern void drm_put_dev(struct drm_device *dev); 1334extern void drm_put_dev(struct drm_device *dev);
1399extern void drm_unplug_dev(struct drm_device *dev); 1335extern void drm_unplug_dev(struct drm_device *dev);
1400extern unsigned int drm_debug; 1336extern unsigned int drm_debug;
1401extern unsigned int drm_rnodes;
1402extern unsigned int drm_universal_planes;
1403 1337
1404extern unsigned int drm_vblank_offdelay; 1338extern unsigned int drm_vblank_offdelay;
1405extern unsigned int drm_timestamp_precision; 1339extern unsigned int drm_timestamp_precision;
1406extern unsigned int drm_timestamp_monotonic; 1340extern unsigned int drm_timestamp_monotonic;
1407 1341
1408extern struct class *drm_class; 1342extern struct class *drm_class;
1409extern struct dentry *drm_debugfs_root;
1410
1411extern struct idr drm_minors_idr;
1412 1343
1413extern struct drm_local_map *drm_getsarea(struct drm_device *dev); 1344extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
1414 1345
@@ -1422,6 +1353,8 @@ extern int drm_debugfs_create_files(const struct drm_info_list *files,
1422extern int drm_debugfs_remove_files(const struct drm_info_list *files, 1353extern int drm_debugfs_remove_files(const struct drm_info_list *files,
1423 int count, struct drm_minor *minor); 1354 int count, struct drm_minor *minor);
1424extern int drm_debugfs_cleanup(struct drm_minor *minor); 1355extern int drm_debugfs_cleanup(struct drm_minor *minor);
1356extern int drm_debugfs_connector_add(struct drm_connector *connector);
1357extern void drm_debugfs_connector_remove(struct drm_connector *connector);
1425#else 1358#else
1426static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id, 1359static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
1427 struct dentry *root) 1360 struct dentry *root)
@@ -1446,6 +1379,15 @@ static inline int drm_debugfs_cleanup(struct drm_minor *minor)
1446{ 1379{
1447 return 0; 1380 return 0;
1448} 1381}
1382
1383static inline int drm_debugfs_connector_add(struct drm_connector *connector)
1384{
1385 return 0;
1386}
1387static inline void drm_debugfs_connector_remove(struct drm_connector *connector)
1388{
1389}
1390
1449#endif 1391#endif
1450 1392
1451 /* Info file support */ 1393 /* Info file support */
@@ -1515,9 +1457,8 @@ extern int drm_pci_set_unique(struct drm_device *dev,
1515struct drm_sysfs_class; 1457struct drm_sysfs_class;
1516extern struct class *drm_sysfs_create(struct module *owner, char *name); 1458extern struct class *drm_sysfs_create(struct module *owner, char *name);
1517extern void drm_sysfs_destroy(void); 1459extern void drm_sysfs_destroy(void);
1518extern int drm_sysfs_device_add(struct drm_minor *minor); 1460extern struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
1519extern void drm_sysfs_hotplug_event(struct drm_device *dev); 1461extern void drm_sysfs_hotplug_event(struct drm_device *dev);
1520extern void drm_sysfs_device_remove(struct drm_minor *minor);
1521extern int drm_sysfs_connector_add(struct drm_connector *connector); 1462extern int drm_sysfs_connector_add(struct drm_connector *connector);
1522extern void drm_sysfs_connector_remove(struct drm_connector *connector); 1463extern void drm_sysfs_connector_remove(struct drm_connector *connector);
1523 1464
@@ -1577,7 +1518,7 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
1577int drm_gem_create_mmap_offset(struct drm_gem_object *obj); 1518int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
1578int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); 1519int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
1579 1520
1580struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); 1521struct page **drm_gem_get_pages(struct drm_gem_object *obj);
1581void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 1522void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
1582 bool dirty, bool accessed); 1523 bool dirty, bool accessed);
1583 1524
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 251b75e6bf7a..f1105d0da059 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -41,6 +41,7 @@ struct drm_framebuffer;
41struct drm_object_properties; 41struct drm_object_properties;
42struct drm_file; 42struct drm_file;
43struct drm_clip_rect; 43struct drm_clip_rect;
44struct device_node;
44 45
45#define DRM_MODE_OBJECT_CRTC 0xcccccccc 46#define DRM_MODE_OBJECT_CRTC 0xcccccccc
46#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 47#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
@@ -75,6 +76,14 @@ static inline uint64_t I642U64(int64_t val)
75 return (uint64_t)*((uint64_t *)&val); 76 return (uint64_t)*((uint64_t *)&val);
76} 77}
77 78
79/* rotation property bits */
80#define DRM_ROTATE_0 0
81#define DRM_ROTATE_90 1
82#define DRM_ROTATE_180 2
83#define DRM_ROTATE_270 3
84#define DRM_REFLECT_X 4
85#define DRM_REFLECT_Y 5
86
78enum drm_connector_force { 87enum drm_connector_force {
79 DRM_FORCE_UNSPECIFIED, 88 DRM_FORCE_UNSPECIFIED,
80 DRM_FORCE_OFF, 89 DRM_FORCE_OFF,
@@ -314,6 +323,7 @@ struct drm_crtc_funcs {
314 */ 323 */
315struct drm_crtc { 324struct drm_crtc {
316 struct drm_device *dev; 325 struct drm_device *dev;
326 struct device_node *port;
317 struct list_head head; 327 struct list_head head;
318 328
319 /** 329 /**
@@ -331,6 +341,10 @@ struct drm_crtc {
331 struct drm_plane *primary; 341 struct drm_plane *primary;
332 struct drm_plane *cursor; 342 struct drm_plane *cursor;
333 343
344 /* position of cursor plane on crtc */
345 int cursor_x;
346 int cursor_y;
347
334 /* Temporary tracking of the old fb while a modeset is ongoing. Used 348 /* Temporary tracking of the old fb while a modeset is ongoing. Used
335 * by drm_mode_set_config_internal to implement correct refcounting. */ 349 * by drm_mode_set_config_internal to implement correct refcounting. */
336 struct drm_framebuffer *old_fb; 350 struct drm_framebuffer *old_fb;
@@ -524,6 +538,8 @@ struct drm_connector {
524 struct drm_property_blob *edid_blob_ptr; 538 struct drm_property_blob *edid_blob_ptr;
525 struct drm_object_properties properties; 539 struct drm_object_properties properties;
526 540
541 struct drm_property_blob *path_blob_ptr;
542
527 uint8_t polled; /* DRM_CONNECTOR_POLL_* */ 543 uint8_t polled; /* DRM_CONNECTOR_POLL_* */
528 544
529 /* requested DPMS state */ 545 /* requested DPMS state */
@@ -533,6 +549,7 @@ struct drm_connector {
533 549
534 /* forced on connector */ 550 /* forced on connector */
535 enum drm_connector_force force; 551 enum drm_connector_force force;
552 bool override_edid;
536 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; 553 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
537 struct drm_encoder *encoder; /* currently active encoder */ 554 struct drm_encoder *encoder; /* currently active encoder */
538 555
@@ -545,6 +562,8 @@ struct drm_connector {
545 int audio_latency[2]; 562 int audio_latency[2];
546 int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */ 563 int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
547 unsigned bad_edid_counter; 564 unsigned bad_edid_counter;
565
566 struct dentry *debugfs_entry;
548}; 567};
549 568
550/** 569/**
@@ -800,6 +819,7 @@ struct drm_mode_config {
800 struct list_head property_blob_list; 819 struct list_head property_blob_list;
801 struct drm_property *edid_property; 820 struct drm_property *edid_property;
802 struct drm_property *dpms_property; 821 struct drm_property *dpms_property;
822 struct drm_property *path_property;
803 struct drm_property *plane_type_property; 823 struct drm_property *plane_type_property;
804 824
805 /* DVI-I properties */ 825 /* DVI-I properties */
@@ -823,6 +843,7 @@ struct drm_mode_config {
823 843
824 /* Optional properties */ 844 /* Optional properties */
825 struct drm_property *scaling_mode_property; 845 struct drm_property *scaling_mode_property;
846 struct drm_property *aspect_ratio_property;
826 struct drm_property *dirty_info_property; 847 struct drm_property *dirty_info_property;
827 848
828 /* dumb ioctl parameters */ 849 /* dumb ioctl parameters */
@@ -852,7 +873,7 @@ struct drm_prop_enum_list {
852extern int drm_crtc_init_with_planes(struct drm_device *dev, 873extern int drm_crtc_init_with_planes(struct drm_device *dev,
853 struct drm_crtc *crtc, 874 struct drm_crtc *crtc,
854 struct drm_plane *primary, 875 struct drm_plane *primary,
855 void *cursor, 876 struct drm_plane *cursor,
856 const struct drm_crtc_funcs *funcs); 877 const struct drm_crtc_funcs *funcs);
857extern int drm_crtc_init(struct drm_device *dev, 878extern int drm_crtc_init(struct drm_device *dev,
858 struct drm_crtc *crtc, 879 struct drm_crtc *crtc,
@@ -878,6 +899,8 @@ extern int drm_connector_init(struct drm_device *dev,
878 struct drm_connector *connector, 899 struct drm_connector *connector,
879 const struct drm_connector_funcs *funcs, 900 const struct drm_connector_funcs *funcs,
880 int connector_type); 901 int connector_type);
902int drm_connector_register(struct drm_connector *connector);
903void drm_connector_unregister(struct drm_connector *connector);
881 904
882extern void drm_connector_cleanup(struct drm_connector *connector); 905extern void drm_connector_cleanup(struct drm_connector *connector);
883/* helper to unplug all connectors from sysfs for device */ 906/* helper to unplug all connectors from sysfs for device */
@@ -937,6 +960,7 @@ extern const char *drm_get_tv_select_name(int val);
937extern void drm_fb_release(struct drm_file *file_priv); 960extern void drm_fb_release(struct drm_file *file_priv);
938extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); 961extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
939extern void drm_mode_group_destroy(struct drm_mode_group *group); 962extern void drm_mode_group_destroy(struct drm_mode_group *group);
963extern void drm_reinit_primary_mode_group(struct drm_device *dev);
940extern bool drm_probe_ddc(struct i2c_adapter *adapter); 964extern bool drm_probe_ddc(struct i2c_adapter *adapter);
941extern struct edid *drm_get_edid(struct drm_connector *connector, 965extern struct edid *drm_get_edid(struct drm_connector *connector,
942 struct i2c_adapter *adapter); 966 struct i2c_adapter *adapter);
@@ -946,6 +970,8 @@ extern void drm_mode_config_init(struct drm_device *dev);
946extern void drm_mode_config_reset(struct drm_device *dev); 970extern void drm_mode_config_reset(struct drm_device *dev);
947extern void drm_mode_config_cleanup(struct drm_device *dev); 971extern void drm_mode_config_cleanup(struct drm_device *dev);
948 972
973extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
974 char *path);
949extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, 975extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
950 struct edid *edid); 976 struct edid *edid);
951 977
@@ -994,7 +1020,8 @@ extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int
994struct drm_property *drm_property_create_bitmask(struct drm_device *dev, 1020struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
995 int flags, const char *name, 1021 int flags, const char *name,
996 const struct drm_prop_enum_list *props, 1022 const struct drm_prop_enum_list *props,
997 int num_values); 1023 int num_props,
1024 uint64_t supported_bits);
998struct drm_property *drm_property_create_range(struct drm_device *dev, int flags, 1025struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
999 const char *name, 1026 const char *name,
1000 uint64_t min, uint64_t max); 1027 uint64_t min, uint64_t max);
@@ -1010,6 +1037,7 @@ extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
1010extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats, 1037extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
1011 char *formats[]); 1038 char *formats[]);
1012extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); 1039extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
1040extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
1013extern int drm_mode_create_dirty_info_property(struct drm_device *dev); 1041extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
1014 1042
1015extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, 1043extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
@@ -1100,6 +1128,10 @@ extern int drm_format_plane_cpp(uint32_t format, int plane);
1100extern int drm_format_horz_chroma_subsampling(uint32_t format); 1128extern int drm_format_horz_chroma_subsampling(uint32_t format);
1101extern int drm_format_vert_chroma_subsampling(uint32_t format); 1129extern int drm_format_vert_chroma_subsampling(uint32_t format);
1102extern const char *drm_get_format_name(uint32_t format); 1130extern const char *drm_get_format_name(uint32_t format);
1131extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
1132 unsigned int supported_rotations);
1133extern unsigned int drm_rotation_simplify(unsigned int rotation,
1134 unsigned int supported_rotations);
1103 1135
1104/* Helpers */ 1136/* Helpers */
1105 1137
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
new file mode 100644
index 000000000000..9b446ada2532
--- /dev/null
+++ b/include/drm/drm_dp_mst_helper.h
@@ -0,0 +1,509 @@
1/*
2 * Copyright © 2014 Red Hat.
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22#ifndef _DRM_DP_MST_HELPER_H_
23#define _DRM_DP_MST_HELPER_H_
24
25#include <linux/types.h>
26#include <drm/drm_dp_helper.h>
27
28struct drm_dp_mst_branch;
29
30/**
31 * struct drm_dp_vcpi - Virtual Channel Payload Identifer
32 * @vcpi: Virtual channel ID.
33 * @pbn: Payload Bandwidth Number for this channel
34 * @aligned_pbn: PBN aligned with slot size
35 * @num_slots: number of slots for this PBN
36 */
37struct drm_dp_vcpi {
38 int vcpi;
39 int pbn;
40 int aligned_pbn;
41 int num_slots;
42};
43
44/**
45 * struct drm_dp_mst_port - MST port
46 * @kref: reference count for this port.
47 * @guid_valid: for DP 1.2 devices if we have validated the GUID.
48 * @guid: guid for DP 1.2 device on this port.
49 * @port_num: port number
50 * @input: if this port is an input port.
51 * @mcs: message capability status - DP 1.2 spec.
52 * @ddps: DisplayPort Device Plug Status - DP 1.2
53 * @pdt: Peer Device Type
54 * @ldps: Legacy Device Plug Status
55 * @dpcd_rev: DPCD revision of device on this port
56 * @num_sdp_streams: Number of simultaneous streams
57 * @num_sdp_stream_sinks: Number of stream sinks
58 * @available_pbn: Available bandwidth for this port.
59 * @next: link to next port on this branch device
60 * @mstb: branch device attach below this port
61 * @aux: i2c aux transport to talk to device connected to this port.
62 * @parent: branch device parent of this port
63 * @vcpi: Virtual Channel Payload info for this port.
64 * @connector: DRM connector this port is connected to.
65 * @mgr: topology manager this port lives under.
66 *
67 * This structure represents an MST port endpoint on a device somewhere
68 * in the MST topology.
69 */
70struct drm_dp_mst_port {
71 struct kref kref;
72
73 /* if dpcd 1.2 device is on this port - its GUID info */
74 bool guid_valid;
75 u8 guid[16];
76
77 u8 port_num;
78 bool input;
79 bool mcs;
80 bool ddps;
81 u8 pdt;
82 bool ldps;
83 u8 dpcd_rev;
84 u8 num_sdp_streams;
85 u8 num_sdp_stream_sinks;
86 uint16_t available_pbn;
87 struct list_head next;
88 struct drm_dp_mst_branch *mstb; /* pointer to an mstb if this port has one */
89 struct drm_dp_aux aux; /* i2c bus for this port? */
90 struct drm_dp_mst_branch *parent;
91
92 struct drm_dp_vcpi vcpi;
93 struct drm_connector *connector;
94 struct drm_dp_mst_topology_mgr *mgr;
95};
96
97/**
98 * struct drm_dp_mst_branch - MST branch device.
99 * @kref: reference count for this port.
100 * @rad: Relative Address to talk to this branch device.
101 * @lct: Link count total to talk to this branch device.
102 * @num_ports: number of ports on the branch.
103 * @msg_slots: one bit per transmitted msg slot.
104 * @ports: linked list of ports on this branch.
105 * @port_parent: pointer to the port parent, NULL if toplevel.
106 * @mgr: topology manager for this branch device.
107 * @tx_slots: transmission slots for this device.
108 * @last_seqno: last sequence number used to talk to this.
109 * @link_address_sent: if a link address message has been sent to this device yet.
110 *
111 * This structure represents an MST branch device, there is one
112 * primary branch device at the root, along with any others connected
113 * to downstream ports
114 */
115struct drm_dp_mst_branch {
116 struct kref kref;
117 u8 rad[8];
118 u8 lct;
119 int num_ports;
120
121 int msg_slots;
122 struct list_head ports;
123
124 /* list of tx ops queue for this port */
125 struct drm_dp_mst_port *port_parent;
126 struct drm_dp_mst_topology_mgr *mgr;
127
128 /* slots are protected by mstb->mgr->qlock */
129 struct drm_dp_sideband_msg_tx *tx_slots[2];
130 int last_seqno;
131 bool link_address_sent;
132};
133
134
135/* sideband msg header - not bit struct */
136struct drm_dp_sideband_msg_hdr {
137 u8 lct;
138 u8 lcr;
139 u8 rad[8];
140 bool broadcast;
141 bool path_msg;
142 u8 msg_len;
143 bool somt;
144 bool eomt;
145 bool seqno;
146};
147
148struct drm_dp_nak_reply {
149 u8 guid[16];
150 u8 reason;
151 u8 nak_data;
152};
153
154struct drm_dp_link_address_ack_reply {
155 u8 guid[16];
156 u8 nports;
157 struct drm_dp_link_addr_reply_port {
158 bool input_port;
159 u8 peer_device_type;
160 u8 port_number;
161 bool mcs;
162 bool ddps;
163 bool legacy_device_plug_status;
164 u8 dpcd_revision;
165 u8 peer_guid[16];
166 u8 num_sdp_streams;
167 u8 num_sdp_stream_sinks;
168 } ports[16];
169};
170
171struct drm_dp_remote_dpcd_read_ack_reply {
172 u8 port_number;
173 u8 num_bytes;
174 u8 bytes[255];
175};
176
177struct drm_dp_remote_dpcd_write_ack_reply {
178 u8 port_number;
179};
180
181struct drm_dp_remote_dpcd_write_nak_reply {
182 u8 port_number;
183 u8 reason;
184 u8 bytes_written_before_failure;
185};
186
187struct drm_dp_remote_i2c_read_ack_reply {
188 u8 port_number;
189 u8 num_bytes;
190 u8 bytes[255];
191};
192
193struct drm_dp_remote_i2c_read_nak_reply {
194 u8 port_number;
195 u8 nak_reason;
196 u8 i2c_nak_transaction;
197};
198
199struct drm_dp_remote_i2c_write_ack_reply {
200 u8 port_number;
201};
202
203
204struct drm_dp_sideband_msg_rx {
205 u8 chunk[48];
206 u8 msg[256];
207 u8 curchunk_len;
208 u8 curchunk_idx; /* chunk we are parsing now */
209 u8 curchunk_hdrlen;
210 u8 curlen; /* total length of the msg */
211 bool have_somt;
212 bool have_eomt;
213 struct drm_dp_sideband_msg_hdr initial_hdr;
214};
215
216
217struct drm_dp_allocate_payload {
218 u8 port_number;
219 u8 number_sdp_streams;
220 u8 vcpi;
221 u16 pbn;
222 u8 sdp_stream_sink[8];
223};
224
225struct drm_dp_allocate_payload_ack_reply {
226 u8 port_number;
227 u8 vcpi;
228 u16 allocated_pbn;
229};
230
231struct drm_dp_connection_status_notify {
232 u8 guid[16];
233 u8 port_number;
234 bool legacy_device_plug_status;
235 bool displayport_device_plug_status;
236 bool message_capability_status;
237 bool input_port;
238 u8 peer_device_type;
239};
240
241struct drm_dp_remote_dpcd_read {
242 u8 port_number;
243 u32 dpcd_address;
244 u8 num_bytes;
245};
246
247struct drm_dp_remote_dpcd_write {
248 u8 port_number;
249 u32 dpcd_address;
250 u8 num_bytes;
251 u8 *bytes;
252};
253
254struct drm_dp_remote_i2c_read {
255 u8 num_transactions;
256 u8 port_number;
257 struct {
258 u8 i2c_dev_id;
259 u8 num_bytes;
260 u8 *bytes;
261 u8 no_stop_bit;
262 u8 i2c_transaction_delay;
263 } transactions[4];
264 u8 read_i2c_device_id;
265 u8 num_bytes_read;
266};
267
268struct drm_dp_remote_i2c_write {
269 u8 port_number;
270 u8 write_i2c_device_id;
271 u8 num_bytes;
272 u8 *bytes;
273};
274
275/* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
276struct drm_dp_port_number_req {
277 u8 port_number;
278};
279
280struct drm_dp_enum_path_resources_ack_reply {
281 u8 port_number;
282 u16 full_payload_bw_number;
283 u16 avail_payload_bw_number;
284};
285
286/* covers POWER_DOWN_PHY, POWER_UP_PHY */
287struct drm_dp_port_number_rep {
288 u8 port_number;
289};
290
291struct drm_dp_query_payload {
292 u8 port_number;
293 u8 vcpi;
294};
295
296struct drm_dp_resource_status_notify {
297 u8 port_number;
298 u8 guid[16];
299 u16 available_pbn;
300};
301
302struct drm_dp_query_payload_ack_reply {
303 u8 port_number;
304 u8 allocated_pbn;
305};
306
307struct drm_dp_sideband_msg_req_body {
308 u8 req_type;
309 union ack_req {
310 struct drm_dp_connection_status_notify conn_stat;
311 struct drm_dp_port_number_req port_num;
312 struct drm_dp_resource_status_notify resource_stat;
313
314 struct drm_dp_query_payload query_payload;
315 struct drm_dp_allocate_payload allocate_payload;
316
317 struct drm_dp_remote_dpcd_read dpcd_read;
318 struct drm_dp_remote_dpcd_write dpcd_write;
319
320 struct drm_dp_remote_i2c_read i2c_read;
321 struct drm_dp_remote_i2c_write i2c_write;
322 } u;
323};
324
325struct drm_dp_sideband_msg_reply_body {
326 u8 reply_type;
327 u8 req_type;
328 union ack_replies {
329 struct drm_dp_nak_reply nak;
330 struct drm_dp_link_address_ack_reply link_addr;
331 struct drm_dp_port_number_rep port_number;
332
333 struct drm_dp_enum_path_resources_ack_reply path_resources;
334 struct drm_dp_allocate_payload_ack_reply allocate_payload;
335 struct drm_dp_query_payload_ack_reply query_payload;
336
337 struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
338 struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
339 struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
340
341 struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
342 struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
343 struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
344 } u;
345};
346
347/* msg is queued to be put into a slot */
348#define DRM_DP_SIDEBAND_TX_QUEUED 0
349/* msg has started transmitting on a slot - still on msgq */
350#define DRM_DP_SIDEBAND_TX_START_SEND 1
351/* msg has finished transmitting on a slot - removed from msgq only in slot */
352#define DRM_DP_SIDEBAND_TX_SENT 2
353/* msg has received a response - removed from slot */
354#define DRM_DP_SIDEBAND_TX_RX 3
355#define DRM_DP_SIDEBAND_TX_TIMEOUT 4
356
357struct drm_dp_sideband_msg_tx {
358 u8 msg[256];
359 u8 chunk[48];
360 u8 cur_offset;
361 u8 cur_len;
362 struct drm_dp_mst_branch *dst;
363 struct list_head next;
364 int seqno;
365 int state;
366 bool path_msg;
367 struct drm_dp_sideband_msg_reply_body reply;
368};
369
370/* sideband msg handler */
371struct drm_dp_mst_topology_mgr;
372struct drm_dp_mst_topology_cbs {
373 /* create a connector for a port */
374 struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *path);
375 void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
376 struct drm_connector *connector);
377 void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
378
379};
380
381#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
382
383#define DP_PAYLOAD_LOCAL 1
384#define DP_PAYLOAD_REMOTE 2
385#define DP_PAYLOAD_DELETE_LOCAL 3
386
387struct drm_dp_payload {
388 int payload_state;
389 int start_slot;
390 int num_slots;
391};
392
393/**
394 * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
395 * @dev: device pointer for adding i2c devices etc.
396 * @cbs: callbacks for connector addition and destruction.
397 * @max_dpcd_transaction_bytes - maximum number of bytes to read/write in one go.
398 * @aux: aux channel for the DP connector.
399 * @max_payloads: maximum number of payloads the GPU can generate.
400 * @conn_base_id: DRM connector ID this mgr is connected to.
401 * @down_rep_recv: msg receiver state for down replies.
402 * @up_req_recv: msg receiver state for up requests.
403 * @lock: protects mst state, primary, guid, dpcd.
404 * @mst_state: if this manager is enabled for an MST capable port.
405 * @mst_primary: pointer to the primary branch device.
406 * @guid_valid: GUID valid for the primary branch device.
407 * @guid: GUID for primary port.
408 * @dpcd: cache of DPCD for primary port.
409 * @pbn_div: PBN to slots divisor.
410 *
411 * This struct represents the toplevel displayport MST topology manager.
412 * There should be one instance of this for every MST capable DP connector
413 * on the GPU.
414 */
415struct drm_dp_mst_topology_mgr {
416
417 struct device *dev;
418 struct drm_dp_mst_topology_cbs *cbs;
419 int max_dpcd_transaction_bytes;
420 struct drm_dp_aux *aux; /* auxch for this topology mgr to use */
421 int max_payloads;
422 int conn_base_id;
423
424 /* only ever accessed from the workqueue - which should be serialised */
425 struct drm_dp_sideband_msg_rx down_rep_recv;
426 struct drm_dp_sideband_msg_rx up_req_recv;
427
428 /* pointer to info about the initial MST device */
429 struct mutex lock; /* protects mst_state + primary + guid + dpcd */
430
431 bool mst_state;
432 struct drm_dp_mst_branch *mst_primary;
433 /* primary MST device GUID */
434 bool guid_valid;
435 u8 guid[16];
436 u8 dpcd[DP_RECEIVER_CAP_SIZE];
437 u8 sink_count;
438 int pbn_div;
439 int total_slots;
440 int avail_slots;
441 int total_pbn;
442
443 /* messages to be transmitted */
444 /* qlock protects the upq/downq and in_progress,
445 the mstb tx_slots and txmsg->state once they are queued */
446 struct mutex qlock;
447 struct list_head tx_msg_downq;
448 struct list_head tx_msg_upq;
449 bool tx_down_in_progress;
450 bool tx_up_in_progress;
451
452 /* payload info + lock for it */
453 struct mutex payload_lock;
454 struct drm_dp_vcpi **proposed_vcpis;
455 struct drm_dp_payload *payloads;
456 unsigned long payload_mask;
457
458 wait_queue_head_t tx_waitq;
459 struct work_struct work;
460
461 struct work_struct tx_work;
462};
463
464int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id);
465
466void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
467
468
469int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
470
471
472int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
473
474
475enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
476
477struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
478
479
480int drm_dp_calc_pbn_mode(int clock, int bpp);
481
482
483bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots);
484
485
486void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
487
488
489void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
490 struct drm_dp_mst_port *port);
491
492
493int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
494 int pbn);
495
496
497int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr);
498
499
500int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
501
502int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
503
504void drm_dp_mst_dump_topology(struct seq_file *m,
505 struct drm_dp_mst_topology_mgr *mgr);
506
507void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
508int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
509#endif
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 7997246d4039..bfd329d613c4 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -86,8 +86,9 @@ struct drm_fb_helper {
86 int crtc_count; 86 int crtc_count;
87 struct drm_fb_helper_crtc *crtc_info; 87 struct drm_fb_helper_crtc *crtc_info;
88 int connector_count; 88 int connector_count;
89 int connector_info_alloc_count;
89 struct drm_fb_helper_connector **connector_info; 90 struct drm_fb_helper_connector **connector_info;
90 struct drm_fb_helper_funcs *funcs; 91 const struct drm_fb_helper_funcs *funcs;
91 struct fb_info *fbdev; 92 struct fb_info *fbdev;
92 u32 pseudo_palette[17]; 93 u32 pseudo_palette[17];
93 struct list_head kernel_fb_list; 94 struct list_head kernel_fb_list;
@@ -97,6 +98,8 @@ struct drm_fb_helper {
97 bool delayed_hotplug; 98 bool delayed_hotplug;
98}; 99};
99 100
101void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
102 const struct drm_fb_helper_funcs *funcs);
100int drm_fb_helper_init(struct drm_device *dev, 103int drm_fb_helper_init(struct drm_device *dev,
101 struct drm_fb_helper *helper, int crtc_count, 104 struct drm_fb_helper *helper, int crtc_count,
102 int max_conn); 105 int max_conn);
@@ -128,4 +131,7 @@ struct drm_display_mode *
128drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, 131drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
129 int width, int height); 132 int width, int height);
130 133
134int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
135int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
136 struct drm_connector *connector);
131#endif 137#endif
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 944f33f8ba38..2bb55b8b9031 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -94,6 +94,8 @@ void mipi_dsi_host_unregister(struct mipi_dsi_host *host);
94#define MIPI_DSI_MODE_VSYNC_FLUSH BIT(8) 94#define MIPI_DSI_MODE_VSYNC_FLUSH BIT(8)
95/* disable EoT packets in HS mode */ 95/* disable EoT packets in HS mode */
96#define MIPI_DSI_MODE_EOT_PACKET BIT(9) 96#define MIPI_DSI_MODE_EOT_PACKET BIT(9)
97/* device supports non-continuous clock behavior (DSI spec 5.6.1) */
98#define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10)
97 99
98enum mipi_dsi_pixel_format { 100enum mipi_dsi_pixel_format {
99 MIPI_DSI_FMT_RGB888, 101 MIPI_DSI_FMT_RGB888,
@@ -121,14 +123,17 @@ struct mipi_dsi_device {
121 unsigned long mode_flags; 123 unsigned long mode_flags;
122}; 124};
123 125
124#define to_mipi_dsi_device(d) container_of(d, struct mipi_dsi_device, dev) 126static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
127{
128 return container_of(dev, struct mipi_dsi_device, dev);
129}
125 130
126int mipi_dsi_attach(struct mipi_dsi_device *dsi); 131int mipi_dsi_attach(struct mipi_dsi_device *dsi);
127int mipi_dsi_detach(struct mipi_dsi_device *dsi); 132int mipi_dsi_detach(struct mipi_dsi_device *dsi);
128int mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, unsigned int channel, 133ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
129 const void *data, size_t len); 134 size_t len);
130ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel, 135ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
131 u8 cmd, void *data, size_t len); 136 size_t len);
132 137
133/** 138/**
134 * struct mipi_dsi_driver - DSI driver 139 * struct mipi_dsi_driver - DSI driver
@@ -144,7 +149,11 @@ struct mipi_dsi_driver {
144 void (*shutdown)(struct mipi_dsi_device *dsi); 149 void (*shutdown)(struct mipi_dsi_device *dsi);
145}; 150};
146 151
147#define to_mipi_dsi_driver(d) container_of(d, struct mipi_dsi_driver, driver) 152static inline struct mipi_dsi_driver *
153to_mipi_dsi_driver(struct device_driver *driver)
154{
155 return container_of(driver, struct mipi_dsi_driver, driver);
156}
148 157
149static inline void *mipi_dsi_get_drvdata(const struct mipi_dsi_device *dsi) 158static inline void *mipi_dsi_get_drvdata(const struct mipi_dsi_device *dsi)
150{ 159{
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
new file mode 100644
index 000000000000..2441f7112074
--- /dev/null
+++ b/include/drm/drm_of.h
@@ -0,0 +1,18 @@
1#ifndef __DRM_OF_H__
2#define __DRM_OF_H__
3
4struct drm_device;
5struct device_node;
6
7#ifdef CONFIG_OF
8extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
9 struct device_node *port);
10#else
11static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
12 struct device_node *port)
13{
14 return 0;
15}
16#endif
17
18#endif /* __DRM_OF_H__ */
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index c2ab77add67c..1fbcc96063a7 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -30,8 +30,42 @@ struct drm_connector;
30struct drm_device; 30struct drm_device;
31struct drm_panel; 31struct drm_panel;
32 32
33/**
34 * struct drm_panel_funcs - perform operations on a given panel
35 * @disable: disable panel (turn off back light, etc.)
36 * @unprepare: turn off panel
37 * @prepare: turn on panel and perform set up
38 * @enable: enable panel (turn on back light, etc.)
39 * @get_modes: add modes to the connector that the panel is attached to and
40 * return the number of modes added
41 *
42 * The .prepare() function is typically called before the display controller
43 * starts to transmit video data. Panel drivers can use this to turn the panel
44 * on and wait for it to become ready. If additional configuration is required
45 * (via a control bus such as I2C, SPI or DSI for example) this is a good time
46 * to do that.
47 *
48 * After the display controller has started transmitting video data, it's safe
49 * to call the .enable() function. This will typically enable the backlight to
50 * make the image on screen visible. Some panels require a certain amount of
51 * time or frames before the image is displayed. This function is responsible
52 * for taking this into account before enabling the backlight to avoid visual
53 * glitches.
54 *
55 * Before stopping video transmission from the display controller it can be
56 * necessary to turn off the panel to avoid visual glitches. This is done in
57 * the .disable() function. Analogously to .enable() this typically involves
58 * turning off the backlight and waiting for some time to make sure no image
59 * is visible on the panel. It is then safe for the display controller to
60 * cease transmission of video data.
61 *
62 * To save power when no video data is transmitted, a driver can power down
63 * the panel. This is the job of the .unprepare() function.
64 */
33struct drm_panel_funcs { 65struct drm_panel_funcs {
34 int (*disable)(struct drm_panel *panel); 66 int (*disable)(struct drm_panel *panel);
67 int (*unprepare)(struct drm_panel *panel);
68 int (*prepare)(struct drm_panel *panel);
35 int (*enable)(struct drm_panel *panel); 69 int (*enable)(struct drm_panel *panel);
36 int (*get_modes)(struct drm_panel *panel); 70 int (*get_modes)(struct drm_panel *panel);
37}; 71};
@@ -46,6 +80,14 @@ struct drm_panel {
46 struct list_head list; 80 struct list_head list;
47}; 81};
48 82
83static inline int drm_panel_unprepare(struct drm_panel *panel)
84{
85 if (panel && panel->funcs && panel->funcs->unprepare)
86 return panel->funcs->unprepare(panel);
87
88 return panel ? -ENOSYS : -EINVAL;
89}
90
49static inline int drm_panel_disable(struct drm_panel *panel) 91static inline int drm_panel_disable(struct drm_panel *panel)
50{ 92{
51 if (panel && panel->funcs && panel->funcs->disable) 93 if (panel && panel->funcs && panel->funcs->disable)
@@ -54,6 +96,14 @@ static inline int drm_panel_disable(struct drm_panel *panel)
54 return panel ? -ENOSYS : -EINVAL; 96 return panel ? -ENOSYS : -EINVAL;
55} 97}
56 98
99static inline int drm_panel_prepare(struct drm_panel *panel)
100{
101 if (panel && panel->funcs && panel->funcs->prepare)
102 return panel->funcs->prepare(panel);
103
104 return panel ? -ENOSYS : -EINVAL;
105}
106
57static inline int drm_panel_enable(struct drm_panel *panel) 107static inline int drm_panel_enable(struct drm_panel *panel)
58{ 108{
59 if (panel && panel->funcs && panel->funcs->enable) 109 if (panel && panel->funcs && panel->funcs->enable)
@@ -62,6 +112,14 @@ static inline int drm_panel_enable(struct drm_panel *panel)
62 return panel ? -ENOSYS : -EINVAL; 112 return panel ? -ENOSYS : -EINVAL;
63} 113}
64 114
115static inline int drm_panel_get_modes(struct drm_panel *panel)
116{
117 if (panel && panel->funcs && panel->funcs->get_modes)
118 return panel->funcs->get_modes(panel);
119
120 return panel ? -ENOSYS : -EINVAL;
121}
122
65void drm_panel_init(struct drm_panel *panel); 123void drm_panel_init(struct drm_panel *panel);
66 124
67int drm_panel_add(struct drm_panel *panel); 125int drm_panel_add(struct drm_panel *panel);
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index d1286297567b..26bb55e9e8b6 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -163,5 +163,11 @@ int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
163 struct drm_rect *dst, 163 struct drm_rect *dst,
164 int min_vscale, int max_vscale); 164 int min_vscale, int max_vscale);
165void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point); 165void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point);
166void drm_rect_rotate(struct drm_rect *r,
167 int width, int height,
168 unsigned int rotation);
169void drm_rect_rotate_inv(struct drm_rect *r,
170 int width, int height,
171 unsigned int rotation);
166 172
167#endif 173#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index a5183da3ef92..202f0a7171e8 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -182,6 +182,7 @@ struct ttm_mem_type_manager_func {
182 * @man: Pointer to a memory type manager. 182 * @man: Pointer to a memory type manager.
183 * @bo: Pointer to the buffer object we're allocating space for. 183 * @bo: Pointer to the buffer object we're allocating space for.
184 * @placement: Placement details. 184 * @placement: Placement details.
185 * @flags: Additional placement flags.
185 * @mem: Pointer to a struct ttm_mem_reg to be filled in. 186 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
186 * 187 *
187 * This function should allocate space in the memory type managed 188 * This function should allocate space in the memory type managed
@@ -206,6 +207,7 @@ struct ttm_mem_type_manager_func {
206 int (*get_node)(struct ttm_mem_type_manager *man, 207 int (*get_node)(struct ttm_mem_type_manager *man,
207 struct ttm_buffer_object *bo, 208 struct ttm_buffer_object *bo,
208 struct ttm_placement *placement, 209 struct ttm_placement *placement,
210 uint32_t flags,
209 struct ttm_mem_reg *mem); 211 struct ttm_mem_reg *mem);
210 212
211 /** 213 /**
@@ -653,18 +655,6 @@ extern void ttm_tt_unbind(struct ttm_tt *ttm);
653extern int ttm_tt_swapin(struct ttm_tt *ttm); 655extern int ttm_tt_swapin(struct ttm_tt *ttm);
654 656
655/** 657/**
656 * ttm_tt_cache_flush:
657 *
658 * @pages: An array of pointers to struct page:s to flush.
659 * @num_pages: Number of pages to flush.
660 *
661 * Flush the data of the indicated pages from the cpu caches.
662 * This is used when changing caching attributes of the pages from
663 * cache-coherent.
664 */
665extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
666
667/**
668 * ttm_tt_set_placement_caching: 658 * ttm_tt_set_placement_caching:
669 * 659 *
670 * @ttm A struct ttm_tt the backing pages of which will change caching policy. 660 * @ttm A struct ttm_tt the backing pages of which will change caching policy.
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index d2b52999e771..bb9840fd1e18 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -164,12 +164,15 @@ int host1x_job_submit(struct host1x_job *job);
164 */ 164 */
165 165
166struct host1x_reloc { 166struct host1x_reloc {
167 struct host1x_bo *cmdbuf; 167 struct {
168 u32 cmdbuf_offset; 168 struct host1x_bo *bo;
169 struct host1x_bo *target; 169 unsigned long offset;
170 u32 target_offset; 170 } cmdbuf;
171 u32 shift; 171 struct {
172 u32 pad; 172 struct host1x_bo *bo;
173 unsigned long offset;
174 } target;
175 unsigned long shift;
173}; 176};
174 177
175struct host1x_job { 178struct host1x_job {
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 9abbeb924cbb..b0b855613641 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -780,7 +780,7 @@ struct drm_prime_handle {
780 780
781/** 781/**
782 * Device specific ioctls should only be in their respective headers 782 * Device specific ioctls should only be in their respective headers
783 * The device specific ioctl range is from 0x40 to 0x99. 783 * The device specific ioctl range is from 0x40 to 0x9f.
784 * Generic IOCTLS restart at 0xA0. 784 * Generic IOCTLS restart at 0xA0.
785 * 785 *
786 * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and 786 * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index def54f9e07ca..a0db2d4aa5f0 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -88,6 +88,11 @@
88#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */ 88#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
89#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */ 89#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
90 90
91/* Picture aspect ratio options */
92#define DRM_MODE_PICTURE_ASPECT_NONE 0
93#define DRM_MODE_PICTURE_ASPECT_4_3 1
94#define DRM_MODE_PICTURE_ASPECT_16_9 2
95
91/* Dithering mode options */ 96/* Dithering mode options */
92#define DRM_MODE_DITHERING_OFF 0 97#define DRM_MODE_DITHERING_OFF 0
93#define DRM_MODE_DITHERING_ON 1 98#define DRM_MODE_DITHERING_ON 1
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 1cc0b610f162..509b2d7a41b7 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -796,7 +796,9 @@ struct drm_radeon_gem_info {
796 uint64_t vram_visible; 796 uint64_t vram_visible;
797}; 797};
798 798
799#define RADEON_GEM_NO_BACKING_STORE 1 799#define RADEON_GEM_NO_BACKING_STORE (1 << 0)
800#define RADEON_GEM_GTT_UC (1 << 1)
801#define RADEON_GEM_GTT_WC (1 << 2)
800 802
801struct drm_radeon_gem_create { 803struct drm_radeon_gem_create {
802 uint64_t size; 804 uint64_t size;
diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
index b75482112428..c15d781ecc0f 100644
--- a/include/uapi/drm/tegra_drm.h
+++ b/include/uapi/drm/tegra_drm.h
@@ -129,6 +129,44 @@ struct drm_tegra_submit {
129 __u32 reserved[5]; /* future expansion */ 129 __u32 reserved[5]; /* future expansion */
130}; 130};
131 131
132#define DRM_TEGRA_GEM_TILING_MODE_PITCH 0
133#define DRM_TEGRA_GEM_TILING_MODE_TILED 1
134#define DRM_TEGRA_GEM_TILING_MODE_BLOCK 2
135
136struct drm_tegra_gem_set_tiling {
137 /* input */
138 __u32 handle;
139 __u32 mode;
140 __u32 value;
141 __u32 pad;
142};
143
144struct drm_tegra_gem_get_tiling {
145 /* input */
146 __u32 handle;
147 /* output */
148 __u32 mode;
149 __u32 value;
150 __u32 pad;
151};
152
153#define DRM_TEGRA_GEM_BOTTOM_UP (1 << 0)
154#define DRM_TEGRA_GEM_FLAGS (DRM_TEGRA_GEM_BOTTOM_UP)
155
156struct drm_tegra_gem_set_flags {
157 /* input */
158 __u32 handle;
159 /* output */
160 __u32 flags;
161};
162
163struct drm_tegra_gem_get_flags {
164 /* input */
165 __u32 handle;
166 /* output */
167 __u32 flags;
168};
169
132#define DRM_TEGRA_GEM_CREATE 0x00 170#define DRM_TEGRA_GEM_CREATE 0x00
133#define DRM_TEGRA_GEM_MMAP 0x01 171#define DRM_TEGRA_GEM_MMAP 0x01
134#define DRM_TEGRA_SYNCPT_READ 0x02 172#define DRM_TEGRA_SYNCPT_READ 0x02
@@ -139,6 +177,10 @@ struct drm_tegra_submit {
139#define DRM_TEGRA_GET_SYNCPT 0x07 177#define DRM_TEGRA_GET_SYNCPT 0x07
140#define DRM_TEGRA_SUBMIT 0x08 178#define DRM_TEGRA_SUBMIT 0x08
141#define DRM_TEGRA_GET_SYNCPT_BASE 0x09 179#define DRM_TEGRA_GET_SYNCPT_BASE 0x09
180#define DRM_TEGRA_GEM_SET_TILING 0x0a
181#define DRM_TEGRA_GEM_GET_TILING 0x0b
182#define DRM_TEGRA_GEM_SET_FLAGS 0x0c
183#define DRM_TEGRA_GEM_GET_FLAGS 0x0d
142 184
143#define DRM_IOCTL_TEGRA_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_CREATE, struct drm_tegra_gem_create) 185#define DRM_IOCTL_TEGRA_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_CREATE, struct drm_tegra_gem_create)
144#define DRM_IOCTL_TEGRA_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_MMAP, struct drm_tegra_gem_mmap) 186#define DRM_IOCTL_TEGRA_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_MMAP, struct drm_tegra_gem_mmap)
@@ -150,5 +192,9 @@ struct drm_tegra_submit {
150#define DRM_IOCTL_TEGRA_GET_SYNCPT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT, struct drm_tegra_get_syncpt) 192#define DRM_IOCTL_TEGRA_GET_SYNCPT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT, struct drm_tegra_get_syncpt)
151#define DRM_IOCTL_TEGRA_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SUBMIT, struct drm_tegra_submit) 193#define DRM_IOCTL_TEGRA_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SUBMIT, struct drm_tegra_submit)
152#define DRM_IOCTL_TEGRA_GET_SYNCPT_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT_BASE, struct drm_tegra_get_syncpt_base) 194#define DRM_IOCTL_TEGRA_GET_SYNCPT_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT_BASE, struct drm_tegra_get_syncpt_base)
195#define DRM_IOCTL_TEGRA_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_SET_TILING, struct drm_tegra_gem_set_tiling)
196#define DRM_IOCTL_TEGRA_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_GET_TILING, struct drm_tegra_gem_get_tiling)
197#define DRM_IOCTL_TEGRA_GEM_SET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_SET_FLAGS, struct drm_tegra_gem_set_flags)
198#define DRM_IOCTL_TEGRA_GEM_GET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_GET_FLAGS, struct drm_tegra_gem_get_flags)
153 199
154#endif 200#endif
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index b0393209679b..eaad58b5be4a 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -19,6 +19,7 @@
19/* VIDCON0 */ 19/* VIDCON0 */
20 20
21#define VIDCON0 0x00 21#define VIDCON0 0x00
22#define VIDCON0_DSI_EN (1 << 30)
22#define VIDCON0_INTERLACE (1 << 29) 23#define VIDCON0_INTERLACE (1 << 29)
23#define VIDCON0_VIDOUT_MASK (0x7 << 26) 24#define VIDCON0_VIDOUT_MASK (0x7 << 26)
24#define VIDCON0_VIDOUT_SHIFT 26 25#define VIDCON0_VIDOUT_SHIFT 26
@@ -355,7 +356,7 @@
355#define VIDINTCON0_INT_ENABLE (1 << 0) 356#define VIDINTCON0_INT_ENABLE (1 << 0)
356 357
357#define VIDINTCON1 0x134 358#define VIDINTCON1 0x134
358#define VIDINTCON1_INT_I180 (1 << 2) 359#define VIDINTCON1_INT_I80 (1 << 2)
359#define VIDINTCON1_INT_FRAME (1 << 1) 360#define VIDINTCON1_INT_FRAME (1 << 1)
360#define VIDINTCON1_INT_FIFO (1 << 0) 361#define VIDINTCON1_INT_FIFO (1 << 0)
361 362